var/home/core/zuul-output/0000755000175000017500000000000015116247132014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015116264343015476 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005734331315116264332017711 0ustar rootrootDec 10 10:44:53 crc systemd[1]: Starting Kubernetes Kubelet... Dec 10 10:44:54 crc restorecon[4677]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:44:54 crc restorecon[4677]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 10 10:44:55 crc kubenswrapper[4780]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 10:44:55 crc kubenswrapper[4780]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 10 10:44:55 crc kubenswrapper[4780]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 10:44:55 crc kubenswrapper[4780]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 10:44:55 crc kubenswrapper[4780]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 10 10:44:55 crc kubenswrapper[4780]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.537326 4780 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540510 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540536 4780 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540542 4780 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540547 4780 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540551 4780 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540555 4780 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540560 4780 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540565 4780 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540568 4780 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540573 4780 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540578 4780 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540582 4780 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540586 4780 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540592 4780 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540597 4780 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540608 4780 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540613 4780 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540618 4780 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540622 4780 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540627 4780 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540633 4780 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540637 4780 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540641 4780 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540644 4780 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540648 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540652 4780 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540655 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540659 4780 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540662 4780 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540666 4780 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540669 4780 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540673 4780 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540678 4780 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540683 4780 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540687 4780 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540693 4780 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540698 4780 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540702 4780 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540706 4780 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540710 4780 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540714 4780 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540717 4780 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540721 4780 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540725 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540728 4780 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540732 4780 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540736 4780 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540742 4780 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540746 4780 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540759 4780 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540763 4780 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540767 4780 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540772 4780 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540777 4780 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540781 4780 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540784 4780 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540789 4780 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540793 4780 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540796 4780 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540800 4780 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540803 4780 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540807 4780 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540811 4780 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540814 4780 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540818 4780 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540821 4780 feature_gate.go:330] unrecognized feature gate: Example Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540825 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540829 4780 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540832 4780 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540836 4780 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.540839 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541120 4780 flags.go:64] FLAG: --address="0.0.0.0" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541134 4780 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541142 4780 flags.go:64] FLAG: --anonymous-auth="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541149 4780 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541158 4780 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541164 4780 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541170 4780 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541176 4780 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541181 4780 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541185 4780 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541190 4780 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541194 4780 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541199 4780 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541203 4780 flags.go:64] FLAG: --cgroup-root="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541208 4780 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541212 4780 flags.go:64] FLAG: --client-ca-file="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541217 4780 flags.go:64] FLAG: --cloud-config="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541221 4780 flags.go:64] FLAG: --cloud-provider="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541225 4780 flags.go:64] FLAG: --cluster-dns="[]" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541229 4780 flags.go:64] FLAG: --cluster-domain="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541234 4780 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541238 4780 flags.go:64] FLAG: --config-dir="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541242 4780 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541247 4780 flags.go:64] FLAG: --container-log-max-files="5" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541253 4780 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541258 4780 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541262 4780 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541266 4780 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541271 4780 flags.go:64] FLAG: --contention-profiling="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541275 4780 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541279 4780 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541283 4780 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541288 4780 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541293 4780 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541297 4780 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541301 4780 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541305 4780 flags.go:64] FLAG: --enable-load-reader="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541310 4780 flags.go:64] FLAG: --enable-server="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541314 4780 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541328 4780 flags.go:64] FLAG: --event-burst="100" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541333 4780 flags.go:64] FLAG: --event-qps="50" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541337 4780 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541341 4780 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541346 4780 flags.go:64] FLAG: --eviction-hard="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541351 4780 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541355 4780 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541359 4780 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541364 4780 flags.go:64] FLAG: --eviction-soft="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541368 4780 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541373 4780 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541377 4780 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541381 4780 flags.go:64] FLAG: --experimental-mounter-path="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541386 4780 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541390 4780 flags.go:64] FLAG: --fail-swap-on="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541395 4780 flags.go:64] FLAG: --feature-gates="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541400 4780 flags.go:64] FLAG: --file-check-frequency="20s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541405 4780 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541409 4780 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541414 4780 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541419 4780 flags.go:64] FLAG: --healthz-port="10248" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541423 4780 flags.go:64] FLAG: --help="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541428 4780 flags.go:64] FLAG: --hostname-override="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541432 4780 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541436 4780 flags.go:64] FLAG: --http-check-frequency="20s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541441 4780 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541445 4780 flags.go:64] FLAG: --image-credential-provider-config="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541449 4780 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541453 4780 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541457 4780 flags.go:64] FLAG: --image-service-endpoint="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541461 4780 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541466 4780 flags.go:64] FLAG: --kube-api-burst="100" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541492 4780 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541497 4780 flags.go:64] FLAG: --kube-api-qps="50" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541501 4780 flags.go:64] FLAG: --kube-reserved="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541505 4780 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541540 4780 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541545 4780 flags.go:64] FLAG: --kubelet-cgroups="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541549 4780 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541554 4780 flags.go:64] FLAG: --lock-file="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541558 4780 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541563 4780 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541567 4780 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541574 4780 flags.go:64] FLAG: --log-json-split-stream="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541578 4780 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541582 4780 flags.go:64] FLAG: --log-text-split-stream="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541587 4780 flags.go:64] FLAG: --logging-format="text" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541592 4780 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541598 4780 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541604 4780 flags.go:64] FLAG: --manifest-url="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541610 4780 flags.go:64] FLAG: --manifest-url-header="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541617 4780 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541623 4780 flags.go:64] FLAG: --max-open-files="1000000" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541628 4780 flags.go:64] FLAG: --max-pods="110" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541632 4780 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541636 4780 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541641 4780 flags.go:64] FLAG: --memory-manager-policy="None" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541645 4780 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541651 4780 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541655 4780 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541659 4780 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541671 4780 flags.go:64] FLAG: --node-status-max-images="50" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541676 4780 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541681 4780 flags.go:64] FLAG: --oom-score-adj="-999" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541707 4780 flags.go:64] FLAG: --pod-cidr="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541712 4780 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541719 4780 flags.go:64] FLAG: --pod-manifest-path="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541724 4780 flags.go:64] FLAG: --pod-max-pids="-1" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541728 4780 flags.go:64] FLAG: --pods-per-core="0" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541732 4780 flags.go:64] FLAG: --port="10250" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541736 4780 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541741 4780 flags.go:64] FLAG: --provider-id="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541745 4780 flags.go:64] FLAG: --qos-reserved="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541749 4780 flags.go:64] FLAG: --read-only-port="10255" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541754 4780 flags.go:64] FLAG: --register-node="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541758 4780 flags.go:64] FLAG: --register-schedulable="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541762 4780 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541769 4780 flags.go:64] FLAG: --registry-burst="10" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541774 4780 flags.go:64] FLAG: --registry-qps="5" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541778 4780 flags.go:64] FLAG: --reserved-cpus="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541782 4780 flags.go:64] FLAG: --reserved-memory="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541787 4780 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541793 4780 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541797 4780 flags.go:64] FLAG: --rotate-certificates="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541801 4780 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541806 4780 flags.go:64] FLAG: --runonce="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541811 4780 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541816 4780 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541820 4780 flags.go:64] FLAG: --seccomp-default="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541824 4780 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541828 4780 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541833 4780 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541837 4780 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541842 4780 flags.go:64] FLAG: --storage-driver-password="root" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541846 4780 flags.go:64] FLAG: --storage-driver-secure="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541850 4780 flags.go:64] FLAG: --storage-driver-table="stats" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541854 4780 flags.go:64] FLAG: --storage-driver-user="root" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541962 4780 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541968 4780 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541973 4780 flags.go:64] FLAG: --system-cgroups="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541977 4780 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541984 4780 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541988 4780 flags.go:64] FLAG: --tls-cert-file="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541992 4780 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.541997 4780 flags.go:64] FLAG: --tls-min-version="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542001 4780 flags.go:64] FLAG: --tls-private-key-file="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542005 4780 flags.go:64] FLAG: --topology-manager-policy="none" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542009 4780 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542014 4780 flags.go:64] FLAG: --topology-manager-scope="container" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542018 4780 flags.go:64] FLAG: --v="2" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542024 4780 flags.go:64] FLAG: --version="false" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542030 4780 flags.go:64] FLAG: --vmodule="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542035 4780 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.542040 4780 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542138 4780 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542143 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542147 4780 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542152 4780 feature_gate.go:330] unrecognized feature gate: Example Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542157 4780 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542162 4780 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542166 4780 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542171 4780 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542176 4780 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542180 4780 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542185 4780 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542189 4780 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542192 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542196 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542200 4780 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542223 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542227 4780 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542230 4780 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542234 4780 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542237 4780 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542241 4780 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542245 4780 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542248 4780 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542252 4780 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542256 4780 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542260 4780 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542263 4780 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542267 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542270 4780 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542275 4780 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542280 4780 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542284 4780 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542288 4780 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542292 4780 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.542296 4780 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549627 4780 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549686 4780 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549699 4780 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549708 4780 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549717 4780 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549725 4780 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549733 4780 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549742 4780 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549757 4780 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549765 4780 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549773 4780 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549781 4780 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549793 4780 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549801 4780 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549810 4780 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549818 4780 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549827 4780 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549837 4780 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549845 4780 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549853 4780 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549861 4780 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549869 4780 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549878 4780 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549886 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549894 4780 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549902 4780 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549915 4780 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549953 4780 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549964 4780 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549973 4780 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549983 4780 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549991 4780 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.549999 4780 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.550009 4780 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.550018 4780 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.550026 4780 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.550068 4780 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.563474 4780 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.563540 4780 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563662 4780 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563679 4780 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563685 4780 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563691 4780 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563696 4780 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563701 4780 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563710 4780 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563718 4780 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563725 4780 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563732 4780 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563737 4780 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563742 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563748 4780 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563753 4780 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563758 4780 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563763 4780 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563768 4780 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563774 4780 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563784 4780 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563789 4780 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563796 4780 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563802 4780 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563807 4780 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563813 4780 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563818 4780 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563823 4780 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563828 4780 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563833 4780 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563837 4780 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563859 4780 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563870 4780 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563875 4780 feature_gate.go:330] unrecognized feature gate: Example Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563880 4780 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563892 4780 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563901 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563907 4780 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563941 4780 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563954 4780 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563960 4780 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563965 4780 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563970 4780 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563975 4780 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563980 4780 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563985 4780 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563990 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563994 4780 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.563999 4780 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564004 4780 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564009 4780 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564014 4780 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564020 4780 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564029 4780 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564038 4780 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564043 4780 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564048 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564053 4780 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564058 4780 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564062 4780 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564067 4780 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564071 4780 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564075 4780 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564091 4780 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564096 4780 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564106 4780 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564111 4780 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564117 4780 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564121 4780 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564124 4780 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564128 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564132 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564136 4780 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.564144 4780 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564273 4780 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564283 4780 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564288 4780 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564292 4780 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564296 4780 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564300 4780 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564304 4780 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564308 4780 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564312 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564315 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564319 4780 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564323 4780 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564326 4780 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564330 4780 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564333 4780 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564337 4780 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564341 4780 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564346 4780 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564351 4780 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564355 4780 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564360 4780 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564367 4780 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564371 4780 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564375 4780 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564379 4780 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564383 4780 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564387 4780 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564391 4780 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564395 4780 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564399 4780 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564402 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564406 4780 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564409 4780 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564413 4780 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564416 4780 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564420 4780 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564423 4780 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564428 4780 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564432 4780 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564436 4780 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564439 4780 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564443 4780 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564447 4780 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564452 4780 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564456 4780 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564460 4780 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564463 4780 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564467 4780 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564471 4780 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564475 4780 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564480 4780 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564485 4780 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564489 4780 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564493 4780 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564497 4780 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564501 4780 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564505 4780 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564510 4780 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564515 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564519 4780 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564523 4780 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564527 4780 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564531 4780 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564535 4780 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564539 4780 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564542 4780 feature_gate.go:330] unrecognized feature gate: Example Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564546 4780 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564552 4780 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564556 4780 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564559 4780 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.564563 4780 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.564570 4780 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.564793 4780 server.go:940] "Client rotation is on, will bootstrap in background" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.569459 4780 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.569606 4780 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.570303 4780 server.go:997] "Starting client certificate rotation" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.570329 4780 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.668905 4780 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-22 20:32:19.117764455 +0000 UTC Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.669182 4780 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 297h47m23.44859374s for next certificate rotation Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.680240 4780 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.682788 4780 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.698288 4780 log.go:25] "Validated CRI v1 runtime API" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.733896 4780 log.go:25] "Validated CRI v1 image API" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.735889 4780 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.739980 4780 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-10-10-40-14-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.740019 4780 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.767262 4780 manager.go:217] Machine: {Timestamp:2025-12-10 10:44:55.765314899 +0000 UTC m=+0.618708342 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:0182e509-70c5-4f26-9ad3-610230bb601e BootID:90085e8a-5ea9-4564-85e4-5635b00d094d Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:d1:1d:f3 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:d1:1d:f3 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:b9:99:4c Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:87:02:70 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:f5:75:11 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:a3:5f:94 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:26:2a:2d:6d:0e:57 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:2a:ac:25:f5:aa:38 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.767572 4780 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.767749 4780 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.768357 4780 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.768629 4780 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.768703 4780 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.769121 4780 topology_manager.go:138] "Creating topology manager with none policy" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.769137 4780 container_manager_linux.go:303] "Creating device plugin manager" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.769612 4780 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.769731 4780 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.770374 4780 state_mem.go:36] "Initialized new in-memory state store" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.770560 4780 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.773131 4780 kubelet.go:418] "Attempting to sync node with API server" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.773168 4780 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.773209 4780 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.773232 4780 kubelet.go:324] "Adding apiserver pod source" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.773304 4780 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.842166 4780 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.842643 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.842790 4780 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.842787 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.842646 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.842859 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.844563 4780 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846042 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846095 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846113 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846137 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846155 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846165 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846173 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846193 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846220 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846230 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846247 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846258 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.846653 4780 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.847504 4780 server.go:1280] "Started kubelet" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.848107 4780 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.848178 4780 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 10 10:44:55 crc systemd[1]: Started Kubernetes Kubelet. Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.850215 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.850537 4780 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.853080 4780 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.853172 4780 server.go:460] "Adding debug handlers to kubelet server" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.853186 4780 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.854110 4780 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.854138 4780 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.853360 4780 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 20:38:58.686659376 +0000 UTC Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.854286 4780 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 297h54m2.832382772s for next certificate rotation Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.853526 4780 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.51:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187fd4bf056ddb32 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 10:44:55.847451442 +0000 UTC m=+0.700844875,LastTimestamp:2025-12-10 10:44:55.847451442 +0000 UTC m=+0.700844875,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.854385 4780 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.854639 4780 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.855455 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.855577 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.856983 4780 factory.go:55] Registering systemd factory Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.857029 4780 factory.go:221] Registration of the systemd container factory successfully Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.857385 4780 factory.go:153] Registering CRI-O factory Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.857401 4780 factory.go:221] Registration of the crio container factory successfully Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.857475 4780 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.857526 4780 factory.go:103] Registering Raw factory Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.857548 4780 manager.go:1196] Started watching for new ooms in manager Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.857743 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="200ms" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.860251 4780 manager.go:319] Starting recovery of all containers Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868654 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868727 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868739 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868757 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868775 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868786 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868798 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868810 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868822 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868834 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868844 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868855 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868866 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868880 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868892 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868902 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868932 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868943 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868953 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868965 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868976 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868987 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.868998 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869010 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869023 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869037 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869055 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869071 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869084 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869102 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869112 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869122 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869135 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869148 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869194 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869208 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869219 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869230 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869241 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869254 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869267 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869279 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869291 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869304 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869317 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869328 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869340 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869350 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869363 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869375 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869385 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869398 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869413 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869425 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869436 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869447 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869459 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869469 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869480 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869543 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869555 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869564 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869574 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869584 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869609 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869624 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869641 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869651 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869667 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869677 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869688 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869705 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869718 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869729 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869742 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869754 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869766 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869776 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869787 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869797 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869807 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869817 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869826 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869845 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869860 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869878 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869892 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869906 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869934 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869944 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869953 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.869969 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870000 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870011 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870022 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870042 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870060 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870069 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870079 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870090 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870101 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870112 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870122 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870136 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870185 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870199 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870210 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870221 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870232 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870242 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870253 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870266 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870279 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870288 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870299 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870309 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870319 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870328 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870337 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870348 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870360 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870370 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870380 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870390 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870399 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870407 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870418 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870429 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870500 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870512 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870522 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870531 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870542 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870551 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870561 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870572 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870582 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870590 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870600 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870609 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870618 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870627 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870637 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870646 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870656 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870666 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870676 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870687 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870697 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870709 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870720 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870731 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870741 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870752 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870763 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870772 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870782 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870792 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870805 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870816 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870828 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870839 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870849 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870861 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870879 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870894 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870904 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870934 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870945 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870956 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870966 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870978 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.870989 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871000 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871015 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871028 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871042 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871054 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871065 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871075 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871087 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871097 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871115 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871125 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871136 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871147 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871158 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871173 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871183 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871195 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871206 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871218 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871228 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871248 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.871260 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874366 4780 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874491 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874525 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874557 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874578 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874598 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874617 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874636 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874659 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874678 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874697 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874713 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874731 4780 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874755 4780 reconstruct.go:97] "Volume reconstruction finished" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.874769 4780 reconciler.go:26] "Reconciler: start to sync state" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.881784 4780 manager.go:324] Recovery completed Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.901031 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.903959 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.904035 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.904050 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.910057 4780 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.910089 4780 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.910124 4780 state_mem.go:36] "Initialized new in-memory state store" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.927065 4780 policy_none.go:49] "None policy: Start" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.928632 4780 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.928679 4780 state_mem.go:35] "Initializing new in-memory state store" Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.954994 4780 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.955339 4780 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.957239 4780 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.957325 4780 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.957374 4780 kubelet.go:2335] "Starting kubelet main sync loop" Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.957439 4780 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 10 10:44:55 crc kubenswrapper[4780]: W1210 10:44:55.959047 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:55 crc kubenswrapper[4780]: E1210 10:44:55.959130 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.991814 4780 manager.go:334] "Starting Device Plugin manager" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.991890 4780 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.991910 4780 server.go:79] "Starting device plugin registration server" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.992448 4780 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.992467 4780 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.992626 4780 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.992790 4780 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 10 10:44:55 crc kubenswrapper[4780]: I1210 10:44:55.992801 4780 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 10 10:44:56 crc kubenswrapper[4780]: E1210 10:44:56.011698 4780 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.058212 4780 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.058470 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: E1210 10:44:56.058908 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="400ms" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.060102 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.060137 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.060146 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.060292 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.060501 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.060557 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.061219 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.061250 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.061259 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.061438 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.061606 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.061652 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062533 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062557 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062573 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062591 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062574 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062627 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062558 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062766 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062799 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062770 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062859 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.062870 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.063594 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.063617 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.063629 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.063840 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.063871 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.063885 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.064033 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.064153 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.064190 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.064584 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.064609 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.064622 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.064773 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.064793 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.065743 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.065775 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.065781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.065787 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.065798 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.065808 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.093371 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.095074 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.095116 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.095129 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.095157 4780 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:44:56 crc kubenswrapper[4780]: E1210 10:44:56.095847 4780 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.177723 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.177808 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.177842 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.177864 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.177985 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178061 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178085 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178128 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178151 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178171 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178192 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178236 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178256 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178333 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.178355 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.279874 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.279960 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.279983 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280002 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280020 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280100 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280126 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280121 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280183 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280183 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280220 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280199 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280167 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280133 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280144 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280431 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280464 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280489 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280491 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280511 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280517 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280527 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280201 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280544 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280564 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280603 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280630 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280659 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280683 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.280793 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.296444 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.298059 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.298100 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.298128 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.298156 4780 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:44:56 crc kubenswrapper[4780]: E1210 10:44:56.298738 4780 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.399499 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.410644 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.429486 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.444395 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: W1210 10:44:56.446545 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-ccbd9edf20f0f3858fb3a56c812598c4a7d99014888758282cf45a6f09eea2dc WatchSource:0}: Error finding container ccbd9edf20f0f3858fb3a56c812598c4a7d99014888758282cf45a6f09eea2dc: Status 404 returned error can't find the container with id ccbd9edf20f0f3858fb3a56c812598c4a7d99014888758282cf45a6f09eea2dc Dec 10 10:44:56 crc kubenswrapper[4780]: W1210 10:44:56.448003 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-82f6a22090aebf84b868b648481a62502feeaafda6c41c0e720d85c2e9bf785d WatchSource:0}: Error finding container 82f6a22090aebf84b868b648481a62502feeaafda6c41c0e720d85c2e9bf785d: Status 404 returned error can't find the container with id 82f6a22090aebf84b868b648481a62502feeaafda6c41c0e720d85c2e9bf785d Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.451103 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:44:56 crc kubenswrapper[4780]: W1210 10:44:56.453360 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-df448f564a7f02b06f5833f83997fdd59bf42518005b4b8ab179bcfe24a59351 WatchSource:0}: Error finding container df448f564a7f02b06f5833f83997fdd59bf42518005b4b8ab179bcfe24a59351: Status 404 returned error can't find the container with id df448f564a7f02b06f5833f83997fdd59bf42518005b4b8ab179bcfe24a59351 Dec 10 10:44:56 crc kubenswrapper[4780]: E1210 10:44:56.460055 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="800ms" Dec 10 10:44:56 crc kubenswrapper[4780]: W1210 10:44:56.466719 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-7e228b999988895f10472acd644ef77931a133694111adab4e01ac0f949d0a8e WatchSource:0}: Error finding container 7e228b999988895f10472acd644ef77931a133694111adab4e01ac0f949d0a8e: Status 404 returned error can't find the container with id 7e228b999988895f10472acd644ef77931a133694111adab4e01ac0f949d0a8e Dec 10 10:44:56 crc kubenswrapper[4780]: W1210 10:44:56.467589 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-2dab4eb9c2082f6698f65d359adae4c16437f5471082af5ab064c5c05d03c1ad WatchSource:0}: Error finding container 2dab4eb9c2082f6698f65d359adae4c16437f5471082af5ab064c5c05d03c1ad: Status 404 returned error can't find the container with id 2dab4eb9c2082f6698f65d359adae4c16437f5471082af5ab064c5c05d03c1ad Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.699215 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.701090 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.701143 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.701158 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.701190 4780 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:44:56 crc kubenswrapper[4780]: E1210 10:44:56.701740 4780 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.851818 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:56 crc kubenswrapper[4780]: W1210 10:44:56.938498 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:56 crc kubenswrapper[4780]: E1210 10:44:56.938601 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.963278 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7e228b999988895f10472acd644ef77931a133694111adab4e01ac0f949d0a8e"} Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.964482 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"df448f564a7f02b06f5833f83997fdd59bf42518005b4b8ab179bcfe24a59351"} Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.965398 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"82f6a22090aebf84b868b648481a62502feeaafda6c41c0e720d85c2e9bf785d"} Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.966162 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"ccbd9edf20f0f3858fb3a56c812598c4a7d99014888758282cf45a6f09eea2dc"} Dec 10 10:44:56 crc kubenswrapper[4780]: I1210 10:44:56.967567 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2dab4eb9c2082f6698f65d359adae4c16437f5471082af5ab064c5c05d03c1ad"} Dec 10 10:44:57 crc kubenswrapper[4780]: W1210 10:44:57.138254 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:57 crc kubenswrapper[4780]: E1210 10:44:57.138661 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:57 crc kubenswrapper[4780]: E1210 10:44:57.261498 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="1.6s" Dec 10 10:44:57 crc kubenswrapper[4780]: W1210 10:44:57.353442 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:57 crc kubenswrapper[4780]: E1210 10:44:57.353600 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:57 crc kubenswrapper[4780]: W1210 10:44:57.434610 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:57 crc kubenswrapper[4780]: E1210 10:44:57.434733 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.501990 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.503971 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.504017 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.504030 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.504092 4780 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:44:57 crc kubenswrapper[4780]: E1210 10:44:57.504764 4780 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.851713 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.975057 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"55c85e62eda4733b5f1e264e6903e3f61bc4759bdf3f891c5b513a2195e0daab"} Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.975132 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88"} Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.977524 4780 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192" exitCode=0 Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.977627 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192"} Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.977681 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.979104 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.979149 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.979163 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.981145 4780 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58" exitCode=0 Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.981244 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58"} Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.981262 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.982139 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.982171 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.982182 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.983743 4780 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7" exitCode=0 Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.983826 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7"} Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.983852 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.984304 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.985183 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.985218 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.985230 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.986799 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.986826 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.986837 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.986835 4780 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20" exitCode=0 Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.986907 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.986887 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20"} Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.987898 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.987996 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:57 crc kubenswrapper[4780]: I1210 10:44:57.988021 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:58 crc kubenswrapper[4780]: W1210 10:44:58.667672 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:58 crc kubenswrapper[4780]: E1210 10:44:58.667784 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:58 crc kubenswrapper[4780]: I1210 10:44:58.852025 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:58 crc kubenswrapper[4780]: E1210 10:44:58.862988 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="3.2s" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.011414 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"f9311d33a46721ffe228b91722ad2c6f36efb49b5e31cb703de657cc4a40dcbe"} Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.011552 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.017535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.017581 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.017597 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.019953 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cf105ae693d68f4e8d5eaae47d0827146c24312f507b378cf60de03a5034bfb7"} Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.019992 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"325438d9694126427a6f6905508c0feb1ab3918532c7d6fd929e63b409574f5f"} Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.020108 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.024135 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.024183 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.024194 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.501123 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:59 crc kubenswrapper[4780]: W1210 10:44:59.502791 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:59 crc kubenswrapper[4780]: E1210 10:44:59.503078 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.503880 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.503944 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.503969 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.504004 4780 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:44:59 crc kubenswrapper[4780]: E1210 10:44:59.504618 4780 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.507205 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703"} Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.507260 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14"} Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.509434 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334"} Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.509478 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8"} Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.511850 4780 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f" exitCode=0 Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.511983 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.511955 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f"} Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.512753 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.512779 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.512839 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:44:59 crc kubenswrapper[4780]: W1210 10:44:59.564423 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:59 crc kubenswrapper[4780]: E1210 10:44:59.564558 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:59 crc kubenswrapper[4780]: W1210 10:44:59.727878 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:44:59 crc kubenswrapper[4780]: E1210 10:44:59.727978 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:44:59 crc kubenswrapper[4780]: I1210 10:44:59.851652 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.743298 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.760616 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.775459 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.763462 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528"} Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.778234 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.778269 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.778281 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.781866 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef"} Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.812844 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.813039 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a"} Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.813298 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.813570 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.814008 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.814041 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.814051 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.815171 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.815217 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.815229 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.816324 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.816375 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.816405 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:00 crc kubenswrapper[4780]: I1210 10:45:00.850894 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:45:01 crc kubenswrapper[4780]: E1210 10:45:01.065522 4780 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.51:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187fd4bf056ddb32 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 10:44:55.847451442 +0000 UTC m=+0.700844875,LastTimestamp:2025-12-10 10:44:55.847451442 +0000 UTC m=+0.700844875,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.085777 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.827573 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8d89d3aee7e5a6ca9dd1a1d69a969e092718c3f8fe818c2bda1dba092136c8fe"} Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.827776 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0"} Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.828329 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.830800 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.830897 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.830941 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.831717 4780 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a" exitCode=0 Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.831808 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a"} Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.831985 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.832078 4780 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.832175 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.832078 4780 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.832291 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833564 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833618 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833629 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833746 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833760 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833783 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833812 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833790 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.833853 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:01 crc kubenswrapper[4780]: I1210 10:45:01.851867 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:45:02 crc kubenswrapper[4780]: W1210 10:45:02.061562 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:45:02 crc kubenswrapper[4780]: E1210 10:45:02.061999 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.51:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:02 crc kubenswrapper[4780]: E1210 10:45:02.064454 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="6.4s" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.089707 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.089966 4780 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.090158 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": dial tcp 192.168.126.11:6443: connect: connection refused" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.705110 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.707776 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.707864 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.707876 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.707964 4780 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:02 crc kubenswrapper[4780]: E1210 10:45:02.708976 4780 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.51:6443: connect: connection refused" node="crc" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.745373 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.837967 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b36eaabc1d3267a1ecc43c1b45a77bb14b87f6b9d376062e48ceb3daca3729b3"} Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.838053 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e07cb0ca88a1b7a32f7c8ba948b0d5f90a08cb9f10c95285d6985d2daef5529b"} Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.838108 4780 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.838180 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.838219 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.838181 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.839552 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.839601 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.839605 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.839644 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.839616 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.839655 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:02 crc kubenswrapper[4780]: I1210 10:45:02.852123 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.51:6443: connect: connection refused Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.847048 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"41450938b04291094dea7c8bf9bf7a8a105fa00797e51a7589d4c6e8caf5eba5"} Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.847130 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ea1c2a440f85963adf3d061a880e537029c5c24a2291a46939e4ec9d939793ab"} Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.847269 4780 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.847311 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.847348 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.848798 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.848823 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.848855 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.848875 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.848894 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:03 crc kubenswrapper[4780]: I1210 10:45:03.848876 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.521515 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.521846 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.523585 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.523649 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.523661 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.859370 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b332743ab49123e4673694feef994f5bb845661ca73dc8c0ac3be56dd6aa7e03"} Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.859504 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.860385 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.860423 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:04 crc kubenswrapper[4780]: I1210 10:45:04.860435 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.160495 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.160709 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.162101 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.162161 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.162171 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.746017 4780 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.746109 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.861895 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.862831 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.862867 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:05 crc kubenswrapper[4780]: I1210 10:45:05.862879 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:06 crc kubenswrapper[4780]: E1210 10:45:06.012880 4780 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 10 10:45:06 crc kubenswrapper[4780]: I1210 10:45:06.103951 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:06 crc kubenswrapper[4780]: I1210 10:45:06.104179 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:06 crc kubenswrapper[4780]: I1210 10:45:06.105622 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:06 crc kubenswrapper[4780]: I1210 10:45:06.105683 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:06 crc kubenswrapper[4780]: I1210 10:45:06.105698 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.109341 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.110960 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.111006 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.111020 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.111048 4780 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.694478 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.694821 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.696647 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.696698 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:09 crc kubenswrapper[4780]: I1210 10:45:09.696711 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.115575 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.115864 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.117409 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.117452 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.117468 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.749891 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.880236 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.881124 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.881179 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.881191 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:12 crc kubenswrapper[4780]: I1210 10:45:12.988289 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 10 10:45:13 crc kubenswrapper[4780]: I1210 10:45:13.028085 4780 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:48828->192.168.126.11:17697: read: connection reset by peer" start-of-body= Dec 10 10:45:13 crc kubenswrapper[4780]: I1210 10:45:13.028163 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:48828->192.168.126.11:17697: read: connection reset by peer" Dec 10 10:45:13 crc kubenswrapper[4780]: I1210 10:45:13.853406 4780 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Dec 10 10:45:13 crc kubenswrapper[4780]: W1210 10:45:13.865984 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Dec 10 10:45:13 crc kubenswrapper[4780]: I1210 10:45:13.866303 4780 trace.go:236] Trace[386721577]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 10:45:03.864) (total time: 10002ms): Dec 10 10:45:13 crc kubenswrapper[4780]: Trace[386721577]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (10:45:13.865) Dec 10 10:45:13 crc kubenswrapper[4780]: Trace[386721577]: [10.002091018s] [10.002091018s] END Dec 10 10:45:13 crc kubenswrapper[4780]: E1210 10:45:13.866346 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.050536 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.055146 4780 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8d89d3aee7e5a6ca9dd1a1d69a969e092718c3f8fe818c2bda1dba092136c8fe" exitCode=255 Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.055200 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"8d89d3aee7e5a6ca9dd1a1d69a969e092718c3f8fe818c2bda1dba092136c8fe"} Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.055350 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.055456 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.057732 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.057776 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.057801 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.057795 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.057847 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.057859 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.058575 4780 scope.go:117] "RemoveContainer" containerID="8d89d3aee7e5a6ca9dd1a1d69a969e092718c3f8fe818c2bda1dba092136c8fe" Dec 10 10:45:14 crc kubenswrapper[4780]: W1210 10:45:14.254920 4780 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Dec 10 10:45:14 crc kubenswrapper[4780]: I1210 10:45:14.255093 4780 trace.go:236] Trace[1106703545]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 10:45:04.252) (total time: 10002ms): Dec 10 10:45:14 crc kubenswrapper[4780]: Trace[1106703545]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (10:45:14.254) Dec 10 10:45:14 crc kubenswrapper[4780]: Trace[1106703545]: [10.002277269s] [10.002277269s] END Dec 10 10:45:14 crc kubenswrapper[4780]: E1210 10:45:14.255139 4780 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.060873 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.062761 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519"} Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.063007 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.064088 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.064127 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.064149 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.166773 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.166981 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.169236 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.169315 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.169331 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.746453 4780 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.746969 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.784180 4780 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.784237 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.825834 4780 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 10 10:45:15 crc kubenswrapper[4780]: I1210 10:45:15.825978 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 10 10:45:16 crc kubenswrapper[4780]: E1210 10:45:16.013810 4780 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 10 10:45:17 crc kubenswrapper[4780]: I1210 10:45:17.167348 4780 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]log ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]etcd ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-api-request-count-filter ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-startkubeinformers ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/generic-apiserver-start-informers ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/priority-and-fairness-config-consumer ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/priority-and-fairness-filter ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-apiextensions-informers ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-apiextensions-controllers ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/crd-informer-synced ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-system-namespaces-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-cluster-authentication-info-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-legacy-token-tracking-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-service-ip-repair-controllers ok Dec 10 10:45:17 crc kubenswrapper[4780]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/priority-and-fairness-config-producer ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/bootstrap-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/start-kube-aggregator-informers ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/apiservice-status-local-available-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/apiservice-status-remote-available-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/apiservice-registration-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/apiservice-wait-for-first-sync ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/apiservice-discovery-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/kube-apiserver-autoregistration ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]autoregister-completion ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/apiservice-openapi-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: [+]poststarthook/apiservice-openapiv3-controller ok Dec 10 10:45:17 crc kubenswrapper[4780]: livez check failed Dec 10 10:45:17 crc kubenswrapper[4780]: I1210 10:45:17.167482 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:45:17 crc kubenswrapper[4780]: I1210 10:45:17.289524 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:17 crc kubenswrapper[4780]: I1210 10:45:17.289751 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:17 crc kubenswrapper[4780]: I1210 10:45:17.291073 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:17 crc kubenswrapper[4780]: I1210 10:45:17.291104 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:17 crc kubenswrapper[4780]: I1210 10:45:17.291115 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:21 crc kubenswrapper[4780]: E1210 10:45:21.106596 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Dec 10 10:45:21 crc kubenswrapper[4780]: E1210 10:45:21.461755 4780 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Dec 10 10:45:21 crc kubenswrapper[4780]: I1210 10:45:21.463776 4780 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 10 10:45:21 crc kubenswrapper[4780]: I1210 10:45:21.466499 4780 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 10 10:45:21 crc kubenswrapper[4780]: I1210 10:45:21.467276 4780 trace.go:236] Trace[801605477]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 10:45:05.818) (total time: 15648ms): Dec 10 10:45:21 crc kubenswrapper[4780]: Trace[801605477]: ---"Objects listed" error: 15648ms (10:45:21.466) Dec 10 10:45:21 crc kubenswrapper[4780]: Trace[801605477]: [15.648574058s] [15.648574058s] END Dec 10 10:45:21 crc kubenswrapper[4780]: I1210 10:45:21.467313 4780 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.094781 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.095067 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.096959 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.097017 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.097041 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.099726 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.114406 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.115048 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.117070 4780 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519" exitCode=255 Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.117124 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519"} Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.117212 4780 scope.go:117] "RemoveContainer" containerID="8d89d3aee7e5a6ca9dd1a1d69a969e092718c3f8fe818c2bda1dba092136c8fe" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.117397 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.118293 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.118335 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.118359 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.119455 4780 scope.go:117] "RemoveContainer" containerID="f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519" Dec 10 10:45:22 crc kubenswrapper[4780]: E1210 10:45:22.119767 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.779947 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.780174 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.781534 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.781585 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.781602 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:22 crc kubenswrapper[4780]: I1210 10:45:22.792698 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.121487 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.123554 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.123717 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.124695 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.124732 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.124745 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.124805 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.124834 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.124847 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.125648 4780 scope.go:117] "RemoveContainer" containerID="f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519" Dec 10 10:45:23 crc kubenswrapper[4780]: E1210 10:45:23.125912 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Dec 10 10:45:23 crc kubenswrapper[4780]: I1210 10:45:23.509587 4780 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 10 10:45:26 crc kubenswrapper[4780]: E1210 10:45:26.015161 4780 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 10 10:45:26 crc kubenswrapper[4780]: I1210 10:45:26.745378 4780 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.109025 4780 apiserver.go:52] "Watching apiserver" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.113953 4780 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.114818 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-ovn-kubernetes/ovnkube-node-fpl55","openshift-dns/node-resolver-2lx8w","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-machine-config-operator/machine-config-daemon-xhdr5","openshift-multus/multus-8cwb7","openshift-multus/multus-additional-cni-plugins-rwfxn"] Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.115991 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.116003 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.116201 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.117259 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.117318 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.117655 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.118303 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.118832 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.119401 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.119476 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.119579 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.119749 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.120026 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.120159 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-2lx8w" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.120226 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.120562 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.120585 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.120724 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.120713 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.120802 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.121286 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.121353 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.123381 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.123773 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.123849 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.124004 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.125375 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.125821 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.125847 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.127269 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.127399 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.127647 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.127792 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.127655 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.129686 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.129789 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.129865 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.130133 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.130393 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.131006 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.131286 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.131380 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.131625 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.131995 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.136414 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.139874 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.152472 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.156052 4780 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.167283 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.179602 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.193814 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.207214 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.217891 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.227247 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231082 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231125 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231141 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231176 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231197 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231214 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231253 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231291 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231312 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231332 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231348 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231387 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231416 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231440 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231472 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231493 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231758 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231809 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231829 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231846 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231830 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231862 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.231977 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232006 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232049 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232041 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232071 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232234 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232306 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232373 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232449 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232492 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232563 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232268 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234152 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234239 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234245 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232528 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232839 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233025 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233104 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233110 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233235 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.233340 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:27.733284831 +0000 UTC m=+32.586678334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234453 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234482 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234519 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234541 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233325 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233342 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233619 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233568 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233652 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234588 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234561 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234666 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234708 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234742 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234771 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234808 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234839 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234866 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234901 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234961 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234985 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235017 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235049 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235073 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235106 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235128 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233728 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233756 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233798 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.233820 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234050 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.232378 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234332 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234810 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.234889 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235141 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235531 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235153 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235591 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235648 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235679 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235707 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235760 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235808 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235821 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235835 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235847 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.235861 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236072 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236201 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236207 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236253 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236284 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236301 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236326 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236342 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236475 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236494 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236516 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236736 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236749 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236774 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236847 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236947 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237004 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237315 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237348 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237349 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237389 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237540 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237767 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237891 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237935 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.237973 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238049 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238114 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238123 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238141 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238326 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.236359 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238824 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238682 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238946 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.238970 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239224 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239351 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239243 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239390 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239414 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239436 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239637 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239667 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239695 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239726 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239760 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239788 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239871 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239916 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.239987 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240005 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240022 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240039 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240064 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240088 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240117 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240135 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240160 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240194 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240211 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240229 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240244 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240264 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240280 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240298 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240319 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240337 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240353 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240379 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240397 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240420 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240437 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240456 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240471 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240489 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240506 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240521 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240539 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240555 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240571 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240594 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240644 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240660 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240676 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240693 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240708 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240723 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240739 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240757 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240772 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240788 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240804 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240819 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240845 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240867 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240887 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240903 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.240976 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241008 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241023 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241061 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241110 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241133 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241151 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241177 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241201 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241218 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241366 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241394 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241418 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241461 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241500 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241520 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241582 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241599 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241616 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241644 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241661 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241683 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241700 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241716 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241720 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241747 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241772 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241823 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241857 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241889 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241939 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.241965 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242015 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242036 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242076 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242098 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242117 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242179 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242204 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242317 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242341 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242363 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242379 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242408 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242427 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242508 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242561 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242592 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242627 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242653 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242682 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242707 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242704 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242743 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242771 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242794 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242815 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242832 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242911 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242968 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.242995 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243020 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243045 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243067 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243084 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243101 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243118 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243136 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243154 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243171 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243225 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243298 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243325 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243484 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-system-cni-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243512 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-cni-multus\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243547 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-node-log\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243564 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-os-release\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243581 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-kubelet\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243597 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-conf-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243611 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-daemon-config\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243638 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgkg4\" (UniqueName: \"kubernetes.io/projected/8a3251eb-408c-42f1-b74d-261cb45eab71-kube-api-access-lgkg4\") pod \"node-resolver-2lx8w\" (UID: \"8a3251eb-408c-42f1-b74d-261cb45eab71\") " pod="openshift-dns/node-resolver-2lx8w" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243661 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-script-lib\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243724 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243766 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243818 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-var-lib-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243845 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243871 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243893 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-system-cni-dir\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243913 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6bf1dca1-b191-4796-b326-baac53e84045-mcd-auth-proxy-config\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243955 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243973 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/deadb49b-61b8-435f-8168-d7bd3c01b5ad-cni-binary-copy\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.243989 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-config\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244005 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4kzt\" (UniqueName: \"kubernetes.io/projected/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-kube-api-access-x4kzt\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244021 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-cnibin\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244039 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-socket-dir-parent\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244064 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-netns\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244081 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244096 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-bin\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244094 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244120 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r67pg\" (UniqueName: \"kubernetes.io/projected/deadb49b-61b8-435f-8168-d7bd3c01b5ad-kube-api-access-r67pg\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244137 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-etc-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244153 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cni-binary-copy\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244169 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-systemd\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244186 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-ovn\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244442 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244463 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovn-node-metrics-cert\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244490 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244518 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244536 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-cni-bin\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244583 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244605 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244622 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-hostroot\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244640 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6bf1dca1-b191-4796-b326-baac53e84045-proxy-tls\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244687 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244706 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-cni-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244722 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-k8s-cni-cncf-io\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244747 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-etc-kubernetes\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244763 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-slash\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244785 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whmzl\" (UniqueName: \"kubernetes.io/projected/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-kube-api-access-whmzl\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244802 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cnibin\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244821 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6bf1dca1-b191-4796-b326-baac53e84045-rootfs\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244838 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-os-release\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244853 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244872 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-log-socket\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244892 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244938 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244961 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-env-overrides\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244991 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-multus-certs\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245018 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245041 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh92h\" (UniqueName: \"kubernetes.io/projected/6bf1dca1-b191-4796-b326-baac53e84045-kube-api-access-sh92h\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245068 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245115 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8a3251eb-408c-42f1-b74d-261cb45eab71-hosts-file\") pod \"node-resolver-2lx8w\" (UID: \"8a3251eb-408c-42f1-b74d-261cb45eab71\") " pod="openshift-dns/node-resolver-2lx8w" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245138 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-kubelet\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245158 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-netd\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245216 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245269 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245340 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-netns\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245415 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-systemd-units\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245454 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-ovn-kubernetes\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245670 4780 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245726 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245746 4780 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245761 4780 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245773 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245789 4780 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245800 4780 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245812 4780 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245822 4780 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.246844 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.246870 4780 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.246886 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.246900 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.246915 4780 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.246991 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247007 4780 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247021 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247036 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247050 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247064 4780 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247078 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247092 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247105 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247118 4780 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247146 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247159 4780 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247224 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247270 4780 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247360 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247392 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247445 4780 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247471 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247493 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247508 4780 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247523 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247545 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247631 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247651 4780 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247664 4780 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247678 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247692 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247708 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247723 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247736 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247749 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247762 4780 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247774 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247787 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247838 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247851 4780 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247865 4780 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247879 4780 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247943 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247955 4780 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247965 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248287 4780 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248309 4780 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248325 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.256019 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.244815 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245389 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245441 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.245669 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.246747 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247782 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.247854 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248110 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248238 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248614 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248639 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248603 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.248885 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.249076 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.249162 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.249298 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.249439 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.249506 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.249635 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.249736 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.250218 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.250297 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.250527 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.251200 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.251619 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.252421 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.252484 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.252533 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.253061 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.253072 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.253084 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.253034 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.253542 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.253600 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.253954 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.253976 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.254512 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.254574 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.254645 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.255117 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.255324 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.255288 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.255658 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.255815 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.256122 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.256162 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.256167 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.256407 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.256779 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.259827 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.267168 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.260246 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.265044 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.265114 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.265407 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.265745 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.266227 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.266298 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.266846 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.269990 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:27.767408382 +0000 UTC m=+32.620801825 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.267487 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.271122 4780 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.271272 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.271493 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.271540 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.271671 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.274742 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.274771 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.274805 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.274834 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.274983 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:27.774955662 +0000 UTC m=+32.628349105 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.275633 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.275643 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.276477 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.277302 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.277633 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.277822 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.278379 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.278744 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.277694 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.279464 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.280098 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.280387 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.280898 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.281999 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.281778 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.281983 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.281994 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282143 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282177 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.281484 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282255 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282421 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282449 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282457 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282650 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282827 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282802 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.282877 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.283342 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.283438 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.283722 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.284305 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.284698 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.285106 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.284978 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.284909 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.285540 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.285637 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.285659 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.285691 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.285698 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.285613 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:27.785521039 +0000 UTC m=+32.638914482 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:27 crc kubenswrapper[4780]: E1210 10:45:27.285806 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:27.785777055 +0000 UTC m=+32.639170498 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.285876 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.286075 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.286111 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.285572 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.286457 4780 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.288125 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.288205 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.289136 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.289332 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.289812 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.289828 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.289906 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.289903 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.293263 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.293379 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.295167 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.298870 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.299275 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.300725 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.301037 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.301158 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.301679 4780 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.301757 4780 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.301797 4780 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.301848 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.301965 4780 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.301997 4780 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.302024 4780 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.302080 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.303935 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.304948 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.305792 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.306047 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.306049 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.306766 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.306890 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.307076 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.307341 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.318465 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.325168 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.325675 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.343230 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.349146 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.350287 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.353397 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.356329 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.387365 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403097 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6bf1dca1-b191-4796-b326-baac53e84045-mcd-auth-proxy-config\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403147 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/deadb49b-61b8-435f-8168-d7bd3c01b5ad-cni-binary-copy\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403168 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-config\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403188 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-cnibin\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403206 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-socket-dir-parent\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403220 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-netns\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403235 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403249 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-bin\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403265 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4kzt\" (UniqueName: \"kubernetes.io/projected/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-kube-api-access-x4kzt\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403290 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r67pg\" (UniqueName: \"kubernetes.io/projected/deadb49b-61b8-435f-8168-d7bd3c01b5ad-kube-api-access-r67pg\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403306 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-etc-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403458 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-cnibin\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403458 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403561 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-bin\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403768 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-socket-dir-parent\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403821 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-etc-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403890 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cni-binary-copy\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403940 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-ovn\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.403962 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovn-node-metrics-cert\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404007 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-netns\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404035 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-cni-bin\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404084 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-cni-bin\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404099 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-systemd\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404156 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-systemd\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404161 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-hostroot\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404187 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6bf1dca1-b191-4796-b326-baac53e84045-proxy-tls\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404213 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-cni-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404235 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-k8s-cni-cncf-io\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404258 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-etc-kubernetes\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404280 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whmzl\" (UniqueName: \"kubernetes.io/projected/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-kube-api-access-whmzl\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404305 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cnibin\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404329 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-config\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404342 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6bf1dca1-b191-4796-b326-baac53e84045-rootfs\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404333 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6bf1dca1-b191-4796-b326-baac53e84045-mcd-auth-proxy-config\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404367 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-os-release\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404383 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/deadb49b-61b8-435f-8168-d7bd3c01b5ad-cni-binary-copy\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404396 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-k8s-cni-cncf-io\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404392 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-slash\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404420 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-slash\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404444 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cnibin\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404451 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404461 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6bf1dca1-b191-4796-b326-baac53e84045-rootfs\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404470 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-etc-kubernetes\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404368 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-ovn\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404474 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-log-socket\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404494 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-log-socket\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404516 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404402 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-hostroot\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404575 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404649 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-multus-certs\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404675 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-env-overrides\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404696 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh92h\" (UniqueName: \"kubernetes.io/projected/6bf1dca1-b191-4796-b326-baac53e84045-kube-api-access-sh92h\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404682 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-os-release\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404731 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8a3251eb-408c-42f1-b74d-261cb45eab71-hosts-file\") pod \"node-resolver-2lx8w\" (UID: \"8a3251eb-408c-42f1-b74d-261cb45eab71\") " pod="openshift-dns/node-resolver-2lx8w" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404833 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-kubelet\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404863 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-netd\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404863 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-multus-certs\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404903 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.404994 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-netns\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405021 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-systemd-units\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405057 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-ovn-kubernetes\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405091 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-cni-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405093 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-system-cni-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405129 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-cni-multus\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405126 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-kubelet\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405176 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-cni-multus\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405161 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-node-log\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405164 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-system-cni-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405195 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405146 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-node-log\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405165 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405227 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-systemd-units\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405195 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-netd\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405222 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-cni-binary-copy\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405225 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-run-netns\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405316 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405330 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-kubelet\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405353 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-conf-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405404 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-host-var-lib-kubelet\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405421 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-conf-dir\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405430 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-daemon-config\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405483 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-ovn-kubernetes\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405519 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgkg4\" (UniqueName: \"kubernetes.io/projected/8a3251eb-408c-42f1-b74d-261cb45eab71-kube-api-access-lgkg4\") pod \"node-resolver-2lx8w\" (UID: \"8a3251eb-408c-42f1-b74d-261cb45eab71\") " pod="openshift-dns/node-resolver-2lx8w" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405562 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-os-release\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405666 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-script-lib\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405718 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-var-lib-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405748 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405761 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405802 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-os-release\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405817 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405839 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-system-cni-dir\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405861 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-system-cni-dir\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405887 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-var-lib-openvswitch\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405946 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/8a3251eb-408c-42f1-b74d-261cb45eab71-hosts-file\") pod \"node-resolver-2lx8w\" (UID: \"8a3251eb-408c-42f1-b74d-261cb45eab71\") " pod="openshift-dns/node-resolver-2lx8w" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405983 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406003 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406015 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406027 4780 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406035 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406046 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406057 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.405953 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-env-overrides\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406067 4780 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406105 4780 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406116 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406126 4780 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406136 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406146 4780 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406156 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406166 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406176 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406186 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406207 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406229 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/deadb49b-61b8-435f-8168-d7bd3c01b5ad-multus-daemon-config\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406217 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406295 4780 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406309 4780 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406323 4780 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406334 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406346 4780 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406359 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406372 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406385 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406410 4780 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406423 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406436 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406448 4780 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406462 4780 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406477 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406489 4780 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406502 4780 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406514 4780 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406528 4780 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406541 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406555 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406577 4780 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406592 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406606 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406620 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406634 4780 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406646 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406659 4780 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406672 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406685 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406697 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406710 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406726 4780 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406739 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406754 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406771 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406785 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406798 4780 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406812 4780 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406826 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406838 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406851 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406864 4780 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406876 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406888 4780 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406899 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406911 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406946 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406960 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406972 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406986 4780 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406996 4780 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407008 4780 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407019 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407033 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407045 4780 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407056 4780 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407066 4780 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407077 4780 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.406620 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-script-lib\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407781 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6bf1dca1-b191-4796-b326-baac53e84045-proxy-tls\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.410425 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovn-node-metrics-cert\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.407089 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417646 4780 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417663 4780 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417680 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417696 4780 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417715 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417729 4780 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417749 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417771 4780 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417789 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417801 4780 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417812 4780 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417823 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417833 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417843 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417851 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417861 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417870 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417883 4780 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417895 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.417908 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418053 4780 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418070 4780 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418078 4780 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418090 4780 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418099 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418114 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418126 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418136 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418146 4780 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418156 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418166 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418175 4780 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418184 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418197 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418209 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418233 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418251 4780 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418262 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418274 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418284 4780 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418293 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418301 4780 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418310 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418318 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418327 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418336 4780 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.418357 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.420875 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r67pg\" (UniqueName: \"kubernetes.io/projected/deadb49b-61b8-435f-8168-d7bd3c01b5ad-kube-api-access-r67pg\") pod \"multus-8cwb7\" (UID: \"deadb49b-61b8-435f-8168-d7bd3c01b5ad\") " pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.423336 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4kzt\" (UniqueName: \"kubernetes.io/projected/5e522fb8-b104-4f14-a3a2-628fbe0ef36c-kube-api-access-x4kzt\") pod \"multus-additional-cni-plugins-rwfxn\" (UID: \"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\") " pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.426450 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgkg4\" (UniqueName: \"kubernetes.io/projected/8a3251eb-408c-42f1-b74d-261cb45eab71-kube-api-access-lgkg4\") pod \"node-resolver-2lx8w\" (UID: \"8a3251eb-408c-42f1-b74d-261cb45eab71\") " pod="openshift-dns/node-resolver-2lx8w" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.427109 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whmzl\" (UniqueName: \"kubernetes.io/projected/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-kube-api-access-whmzl\") pod \"ovnkube-node-fpl55\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.431845 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh92h\" (UniqueName: \"kubernetes.io/projected/6bf1dca1-b191-4796-b326-baac53e84045-kube-api-access-sh92h\") pod \"machine-config-daemon-xhdr5\" (UID: \"6bf1dca1-b191-4796-b326-baac53e84045\") " pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.441712 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.450510 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.461124 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.474590 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:45:27 crc kubenswrapper[4780]: W1210 10:45:27.477072 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-945ded3979c955caa03a34a7a7ad7148093e883f0348108a4e4eed23399b14e2 WatchSource:0}: Error finding container 945ded3979c955caa03a34a7a7ad7148093e883f0348108a4e4eed23399b14e2: Status 404 returned error can't find the container with id 945ded3979c955caa03a34a7a7ad7148093e883f0348108a4e4eed23399b14e2 Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.483656 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8cwb7" Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.493140 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" Dec 10 10:45:27 crc kubenswrapper[4780]: W1210 10:45:27.501081 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-ba4fc8a9929cc6838788d16b8115c67645b6839473a7be62468482bd6b17271d WatchSource:0}: Error finding container ba4fc8a9929cc6838788d16b8115c67645b6839473a7be62468482bd6b17271d: Status 404 returned error can't find the container with id ba4fc8a9929cc6838788d16b8115c67645b6839473a7be62468482bd6b17271d Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.502148 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-2lx8w" Dec 10 10:45:27 crc kubenswrapper[4780]: W1210 10:45:27.503936 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6bf1dca1_b191_4796_b326_baac53e84045.slice/crio-2ba01996703a8e3cb7b83de17579e196e64a4d57c325cd447bb70cae8162dbeb WatchSource:0}: Error finding container 2ba01996703a8e3cb7b83de17579e196e64a4d57c325cd447bb70cae8162dbeb: Status 404 returned error can't find the container with id 2ba01996703a8e3cb7b83de17579e196e64a4d57c325cd447bb70cae8162dbeb Dec 10 10:45:27 crc kubenswrapper[4780]: I1210 10:45:27.510635 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:27 crc kubenswrapper[4780]: W1210 10:45:27.569422 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e522fb8_b104_4f14_a3a2_628fbe0ef36c.slice/crio-ff88b4b9857eac4bf656138e07108135782876fb258e0ff12e84681ebb932921 WatchSource:0}: Error finding container ff88b4b9857eac4bf656138e07108135782876fb258e0ff12e84681ebb932921: Status 404 returned error can't find the container with id ff88b4b9857eac4bf656138e07108135782876fb258e0ff12e84681ebb932921 Dec 10 10:45:27 crc kubenswrapper[4780]: W1210 10:45:27.598776 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a3251eb_408c_42f1_b74d_261cb45eab71.slice/crio-9ea7cf221feab318fe29bc800be1c0e8772100798721978649b1bde568c141f7 WatchSource:0}: Error finding container 9ea7cf221feab318fe29bc800be1c0e8772100798721978649b1bde568c141f7: Status 404 returned error can't find the container with id 9ea7cf221feab318fe29bc800be1c0e8772100798721978649b1bde568c141f7 Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.155030 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.155155 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.155193 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.155227 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.155252 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.155373 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.155458 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:29.155440314 +0000 UTC m=+34.008833757 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156281 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:29.156269105 +0000 UTC m=+34.009662558 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156390 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156425 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:29.156418228 +0000 UTC m=+34.009811671 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156511 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156536 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156551 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156567 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156603 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156610 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:29.156598783 +0000 UTC m=+34.009992226 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156619 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.156694 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:29.156672405 +0000 UTC m=+34.010065878 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.185452 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.186214 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.188296 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.190607 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.191381 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.192459 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.193218 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.193878 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.195275 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.195957 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.197614 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.198581 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.200396 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.201183 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.201848 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.203366 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.204204 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.205256 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.206047 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.207577 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.208844 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.209613 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.210675 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.211646 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.212379 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.214136 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.215384 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.216792 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.217835 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.219691 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.220419 4780 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.220587 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.223252 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.224062 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.224633 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.227514 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.228883 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.251405 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.252346 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.255780 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.257062 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.258958 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.259719 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.260767 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.261275 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.262333 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.263139 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.264455 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.265072 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.268592 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.269635 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.273021 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.274993 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.275902 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.276641 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"2ba01996703a8e3cb7b83de17579e196e64a4d57c325cd447bb70cae8162dbeb"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.276752 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"acddca5a6d4a2265bc33477d5e8099a5451ef95dc5bc61d60384160a86df30ef"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.276830 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"945ded3979c955caa03a34a7a7ad7148093e883f0348108a4e4eed23399b14e2"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.276898 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-2lx8w" event={"ID":"8a3251eb-408c-42f1-b74d-261cb45eab71","Type":"ContainerStarted","Data":"9ea7cf221feab318fe29bc800be1c0e8772100798721978649b1bde568c141f7"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.277010 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ba4fc8a9929cc6838788d16b8115c67645b6839473a7be62468482bd6b17271d"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.277082 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"0ae266c98886075cfee6fbd79b266be7ef745f38c6d1283c5150f29e3aea474c"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.277162 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerStarted","Data":"ff88b4b9857eac4bf656138e07108135782876fb258e0ff12e84681ebb932921"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.277245 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8cwb7" event={"ID":"deadb49b-61b8-435f-8168-d7bd3c01b5ad","Type":"ContainerStarted","Data":"2828d1ecfd6b4790649fd340bcbbd6e8aacc6cf483dc00dce4adc5d8d14ef5be"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.462770 4780 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.465850 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.465944 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.465962 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.466148 4780 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.474180 4780 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.474526 4780 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.475734 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.475778 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.475791 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.475812 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.475843 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:28Z","lastTransitionTime":"2025-12-10T10:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.490456 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.494901 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.494980 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.495000 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.495016 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.495026 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:28Z","lastTransitionTime":"2025-12-10T10:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.506891 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.511256 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.511297 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.511309 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.511328 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.511341 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:28Z","lastTransitionTime":"2025-12-10T10:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.523392 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.529626 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.529665 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.529676 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.529718 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.529730 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:28Z","lastTransitionTime":"2025-12-10T10:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.541232 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.546627 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.546670 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.546680 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.546696 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.546707 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:28Z","lastTransitionTime":"2025-12-10T10:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.558978 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:28 crc kubenswrapper[4780]: E1210 10:45:28.559189 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.561982 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.562020 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.562032 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.562052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.562063 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:28Z","lastTransitionTime":"2025-12-10T10:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.665189 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.665226 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.665235 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.665252 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.665262 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:28Z","lastTransitionTime":"2025-12-10T10:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.805567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.805630 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.805643 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.805669 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:28 crc kubenswrapper[4780]: I1210 10:45:28.805685 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:28Z","lastTransitionTime":"2025-12-10T10:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:28.978743 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:28.978818 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:28.978955 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:28.978985 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:28.979147 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.020656 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.041171 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.041311 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.041327 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.041379 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.041395 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.147992 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.148045 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.148058 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.148078 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.148088 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.225737 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.225865 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.225899 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.225941 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.225975 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226115 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226133 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226145 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226225 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:31.226194395 +0000 UTC m=+36.079587838 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226305 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:31.226294837 +0000 UTC m=+36.079688280 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226360 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226372 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226380 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226419 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:31.22639564 +0000 UTC m=+36.079789083 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226454 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226491 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:31.226477692 +0000 UTC m=+36.079871155 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226558 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:29 crc kubenswrapper[4780]: E1210 10:45:29.226592 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:31.226582865 +0000 UTC m=+36.079976298 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.254130 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.254169 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.254183 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.254227 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.254239 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.341420 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-2lx8w" event={"ID":"8a3251eb-408c-42f1-b74d-261cb45eab71","Type":"ContainerStarted","Data":"414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.343442 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a" exitCode=0 Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.343510 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.346623 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerStarted","Data":"5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.370196 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.370261 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8cwb7" event={"ID":"deadb49b-61b8-435f-8168-d7bd3c01b5ad","Type":"ContainerStarted","Data":"b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.370243 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.370351 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.370369 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.370393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.370427 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.372787 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.372845 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.389524 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.389618 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.391740 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.396953 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.412741 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.473098 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.473163 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.473176 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.473193 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.473203 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.480081 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.490540 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.502650 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.519410 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.577652 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.577701 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.577713 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.577734 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.577784 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.588983 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.623119 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.637080 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.649665 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.662484 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.682954 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.683295 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.683384 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.683473 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.683576 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.707724 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.727876 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.737370 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.749334 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.760464 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.804036 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.804493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.804831 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.805030 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.805126 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.906299 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.911169 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.912024 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.912425 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.912528 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.912631 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:29Z","lastTransitionTime":"2025-12-10T10:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:29 crc kubenswrapper[4780]: I1210 10:45:29.928533 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.002179 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.016435 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.016484 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.016516 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.016536 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.016545 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.031865 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.050969 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.119228 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.119277 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.119290 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.119309 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.119325 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.233653 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.233716 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.233730 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.233753 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.233770 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.345535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.345585 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.345597 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.345619 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.345631 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.449839 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.450148 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.450232 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.450302 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.450412 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.456273 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.456404 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.552995 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.553038 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.553049 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.553066 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.553079 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.607884 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-msm77"] Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.608409 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.618241 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.618546 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.618771 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.618936 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.640357 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.655498 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.656380 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.656424 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.656437 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.656464 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.656480 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.672715 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.689801 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.707779 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.836383 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.837008 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-host\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.837062 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ttwk\" (UniqueName: \"kubernetes.io/projected/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-kube-api-access-2ttwk\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.837127 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-serviceca\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.838856 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.838904 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.838948 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.838971 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.838984 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.861240 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.878967 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.892660 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.937781 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-host\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.937876 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ttwk\" (UniqueName: \"kubernetes.io/projected/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-kube-api-access-2ttwk\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.937953 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-serviceca\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.937963 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-host\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.939203 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-serviceca\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.941679 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.941734 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.941750 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.941775 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.941792 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:30Z","lastTransitionTime":"2025-12-10T10:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.958474 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.958500 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:30 crc kubenswrapper[4780]: I1210 10:45:30.958542 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:30 crc kubenswrapper[4780]: E1210 10:45:30.958630 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:30 crc kubenswrapper[4780]: E1210 10:45:30.959061 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:30 crc kubenswrapper[4780]: E1210 10:45:30.959152 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.007643 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ttwk\" (UniqueName: \"kubernetes.io/projected/c1046aa3-9bf1-4013-8e8d-5629f08ed5e2-kube-api-access-2ttwk\") pod \"node-ca-msm77\" (UID: \"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\") " pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.045204 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.045242 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.045256 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.045276 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.045286 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.093035 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.124083 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.142470 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.149565 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.149731 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.149755 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.149781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.149796 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.246749 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.247333 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.247380 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.247413 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.247443 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.247602 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.247624 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.247638 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.247699 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:35.247679186 +0000 UTC m=+40.101072629 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248207 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:35.248194969 +0000 UTC m=+40.101588412 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248273 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248287 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248300 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248353 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:35.248343723 +0000 UTC m=+40.101737166 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248408 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248443 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:35.248433325 +0000 UTC m=+40.101826768 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248470 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.248593 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:35.248569498 +0000 UTC m=+40.101962951 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.248686 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-msm77" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.270123 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.270188 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.270199 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.270229 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.270242 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.283009 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.296075 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.308483 4780 scope.go:117] "RemoveContainer" containerID="f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519" Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.308859 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Dec 10 10:45:31 crc kubenswrapper[4780]: W1210 10:45:31.321635 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1046aa3_9bf1_4013_8e8d_5629f08ed5e2.slice/crio-13a37deae8ff9ccd48306548f6713afee1c959d593407ea7956575b7ac4c1bb7 WatchSource:0}: Error finding container 13a37deae8ff9ccd48306548f6713afee1c959d593407ea7956575b7ac4c1bb7: Status 404 returned error can't find the container with id 13a37deae8ff9ccd48306548f6713afee1c959d593407ea7956575b7ac4c1bb7 Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.375493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.375549 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.375559 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.375581 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.375593 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.482904 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.482993 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.483007 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.483030 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.483044 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.485247 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-msm77" event={"ID":"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2","Type":"ContainerStarted","Data":"13a37deae8ff9ccd48306548f6713afee1c959d593407ea7956575b7ac4c1bb7"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.488605 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.488664 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.491300 4780 generic.go:334] "Generic (PLEG): container finished" podID="5e522fb8-b104-4f14-a3a2-628fbe0ef36c" containerID="5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d" exitCode=0 Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.491943 4780 scope.go:117] "RemoveContainer" containerID="f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519" Dec 10 10:45:31 crc kubenswrapper[4780]: E1210 10:45:31.492119 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.492284 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerDied","Data":"5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.513573 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.531564 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.550572 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.585170 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.585203 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.585214 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.585231 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.585243 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.593867 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.629446 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.705073 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.737440 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.755976 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.756023 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.756040 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.756058 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.756068 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.758459 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.776587 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.793257 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.808246 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.821638 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.833266 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.858492 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.858541 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.858554 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.858574 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.858586 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.963377 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.963486 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.963521 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.963576 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:31 crc kubenswrapper[4780]: I1210 10:45:31.963606 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:31Z","lastTransitionTime":"2025-12-10T10:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.066257 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.066299 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.066311 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.066327 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.066337 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.169778 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.169837 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.169853 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.169900 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.169940 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.273755 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.273830 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.273845 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.273870 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.273884 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.378056 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.378117 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.378137 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.378183 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.378199 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.480569 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.480617 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.480628 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.480649 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.480664 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.498078 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.507031 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.507127 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.520561 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.535930 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.553354 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.579407 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.583959 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.584005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.584015 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.584052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.584071 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.603547 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.623096 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.647211 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.666189 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.687781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.687825 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.687836 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.687857 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.687872 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.690099 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.738286 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.753637 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.768980 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.795323 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.819942 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.820005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.820028 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.820051 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.820066 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.931025 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.931681 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.931703 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.931733 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.931787 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:32Z","lastTransitionTime":"2025-12-10T10:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.958034 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:32 crc kubenswrapper[4780]: E1210 10:45:32.958212 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.958686 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:32 crc kubenswrapper[4780]: E1210 10:45:32.958764 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:32 crc kubenswrapper[4780]: I1210 10:45:32.958816 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:32 crc kubenswrapper[4780]: E1210 10:45:32.958878 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.034906 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.034971 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.034983 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.035002 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.035014 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.140255 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.140330 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.140380 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.140401 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.140411 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.243131 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.243469 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.243580 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.243701 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.243789 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.366390 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.366452 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.366471 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.366494 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.366507 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.470189 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.470238 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.470249 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.470268 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.470280 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.544008 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerStarted","Data":"4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.546132 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-msm77" event={"ID":"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2","Type":"ContainerStarted","Data":"6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.560345 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.573997 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.574047 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.574057 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.574094 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.574106 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.593134 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.611480 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.639706 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.674526 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.687080 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.687130 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.687144 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.687163 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.687173 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.691136 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.706699 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.718380 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.745561 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.766362 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.790254 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.790306 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.790318 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.790337 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.790352 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.797324 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.811951 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.824352 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.837158 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.850547 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.863463 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.875838 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.891004 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.893239 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.893271 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.893283 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.893302 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.893314 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.907447 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.924868 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.954094 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.980017 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:33Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.996316 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.996356 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.996369 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.996386 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:33 crc kubenswrapper[4780]: I1210 10:45:33.996400 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:33Z","lastTransitionTime":"2025-12-10T10:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.009612 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.025947 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.040174 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.053243 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.099604 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.099650 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.099662 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.099680 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.099692 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.202078 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.202192 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.202211 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.202227 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.202238 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.305299 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.305350 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.305359 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.305377 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.305389 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.407952 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.407985 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.407995 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.408009 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.408022 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.509944 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.509986 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.509998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.510019 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.510031 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.613067 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.613115 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.613127 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.613147 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.613160 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.719964 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.720071 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.720091 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.720120 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.720139 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.823271 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.823303 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.823312 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.823327 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.823336 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.927669 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.927763 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.927774 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.927793 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.927803 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:34Z","lastTransitionTime":"2025-12-10T10:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.958108 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.958108 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:34 crc kubenswrapper[4780]: E1210 10:45:34.958507 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:34 crc kubenswrapper[4780]: E1210 10:45:34.958563 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:34 crc kubenswrapper[4780]: I1210 10:45:34.958121 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:34 crc kubenswrapper[4780]: E1210 10:45:34.958665 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.364162 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.364376 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.364423 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.364469 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.364501 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.364669 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.364693 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.364713 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.364786 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.364767279 +0000 UTC m=+48.218160722 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.364846 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.364881 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.364872441 +0000 UTC m=+48.218265894 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.364960 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.364999 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.364991274 +0000 UTC m=+48.218384707 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.365057 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.365068 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.365085 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.365109 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.365101907 +0000 UTC m=+48.218495350 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:35 crc kubenswrapper[4780]: E1210 10:45:35.367057 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.367040806 +0000 UTC m=+48.220434259 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.374107 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.374449 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.374630 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.374746 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.374846 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:35Z","lastTransitionTime":"2025-12-10T10:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.478296 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.478347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.478392 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.478425 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.478444 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:35Z","lastTransitionTime":"2025-12-10T10:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.580981 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.581034 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.581049 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.581084 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.581097 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:35Z","lastTransitionTime":"2025-12-10T10:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.683933 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.684349 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.684369 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.684387 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.684396 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:35Z","lastTransitionTime":"2025-12-10T10:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.803511 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.803557 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.803566 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.803588 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.803601 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:35Z","lastTransitionTime":"2025-12-10T10:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.907434 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.907473 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.907481 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.907496 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.907506 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:35Z","lastTransitionTime":"2025-12-10T10:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:35 crc kubenswrapper[4780]: I1210 10:45:35.974728 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:35Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.009392 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.009445 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.009456 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.009475 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.009491 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.073680 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:35Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.256341 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.256377 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.256387 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.256402 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.256413 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.275891 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.290111 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.305224 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.318255 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.345482 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.359778 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.359854 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.359864 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.359899 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.359910 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.366879 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.382057 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.396588 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.413968 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.427800 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.457520 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.462717 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.462754 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.462780 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.462805 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.462821 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.558821 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.560538 4780 generic.go:334] "Generic (PLEG): container finished" podID="5e522fb8-b104-4f14-a3a2-628fbe0ef36c" containerID="4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e" exitCode=0 Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.560596 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerDied","Data":"4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.564354 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.564384 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.564394 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.564408 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.564419 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.594578 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.610913 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.624158 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.661885 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.669684 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.669906 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.669963 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.669986 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.669999 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.679577 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.695296 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.710757 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.772074 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.775225 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.775261 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.775271 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.775285 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.775295 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.783531 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.798655 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.814868 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.831353 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.878243 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.878305 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.878322 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.878352 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.878369 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.903176 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.957779 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.957978 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:36 crc kubenswrapper[4780]: E1210 10:45:36.958061 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.958138 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:36 crc kubenswrapper[4780]: E1210 10:45:36.958181 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:36 crc kubenswrapper[4780]: E1210 10:45:36.958453 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.984482 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.984536 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.984546 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.984564 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:36 crc kubenswrapper[4780]: I1210 10:45:36.984573 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:36Z","lastTransitionTime":"2025-12-10T10:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.087330 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.087372 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.087384 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.087410 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.087424 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:37Z","lastTransitionTime":"2025-12-10T10:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.214570 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.214641 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.214664 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.214740 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.214766 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:37Z","lastTransitionTime":"2025-12-10T10:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.317997 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.318036 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.318047 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.318064 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.318077 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:37Z","lastTransitionTime":"2025-12-10T10:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.421740 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.421792 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.421802 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.421822 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.421833 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:37Z","lastTransitionTime":"2025-12-10T10:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.524800 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.524837 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.524846 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.524863 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.524875 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:37Z","lastTransitionTime":"2025-12-10T10:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.572420 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.573128 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.573214 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.577798 4780 generic.go:334] "Generic (PLEG): container finished" podID="5e522fb8-b104-4f14-a3a2-628fbe0ef36c" containerID="fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316" exitCode=0 Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.577888 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerDied","Data":"fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.591492 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.766038 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.766370 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.766484 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.766663 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.766750 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:37Z","lastTransitionTime":"2025-12-10T10:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.766232 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.771238 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.783980 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.805903 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.827431 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.849259 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.869175 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.871715 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.871747 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.871755 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.871781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.871794 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:37Z","lastTransitionTime":"2025-12-10T10:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.887032 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.902566 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.922254 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.944298 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.963242 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.977063 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.977880 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.977935 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.977950 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.977968 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.977978 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:37Z","lastTransitionTime":"2025-12-10T10:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:37 crc kubenswrapper[4780]: I1210 10:45:37.995539 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.013480 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.027833 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.043451 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.057856 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.079449 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.081835 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.081875 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.081887 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.081931 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.081951 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.092948 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.104906 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.117844 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.133949 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.149793 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.166885 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.182069 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.186105 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.186187 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.186198 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.186223 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.186241 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.290344 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.290416 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.290429 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.290456 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.290477 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.393435 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.393464 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.393473 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.393487 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.393496 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.495788 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.495834 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.495847 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.495867 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.495892 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.553095 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj"] Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.554256 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.557468 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.557741 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.570938 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.581750 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.582905 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.593620 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.598148 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.598697 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.598715 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.598737 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.598751 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.606542 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.620438 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.626660 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.634956 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.653966 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.670613 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.671554 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a78da08f-1ec1-4cc7-af55-d527da423778-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.671763 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a78da08f-1ec1-4cc7-af55-d527da423778-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.672168 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a78da08f-1ec1-4cc7-af55-d527da423778-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.672303 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvtxj\" (UniqueName: \"kubernetes.io/projected/a78da08f-1ec1-4cc7-af55-d527da423778-kube-api-access-jvtxj\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.685554 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.703052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.703116 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.703131 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.703155 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.703171 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.707126 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.727187 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.744539 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.759680 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.773474 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.773890 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a78da08f-1ec1-4cc7-af55-d527da423778-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.774065 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvtxj\" (UniqueName: \"kubernetes.io/projected/a78da08f-1ec1-4cc7-af55-d527da423778-kube-api-access-jvtxj\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.774116 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a78da08f-1ec1-4cc7-af55-d527da423778-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.774190 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a78da08f-1ec1-4cc7-af55-d527da423778-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.775885 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a78da08f-1ec1-4cc7-af55-d527da423778-env-overrides\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.776476 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a78da08f-1ec1-4cc7-af55-d527da423778-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.785878 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a78da08f-1ec1-4cc7-af55-d527da423778-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.790353 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.823826 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvtxj\" (UniqueName: \"kubernetes.io/projected/a78da08f-1ec1-4cc7-af55-d527da423778-kube-api-access-jvtxj\") pod \"ovnkube-control-plane-749d76644c-6m7tj\" (UID: \"a78da08f-1ec1-4cc7-af55-d527da423778\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.825739 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.825796 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.825807 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.825831 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.825843 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.837980 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.853469 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.870243 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.871955 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: W1210 10:45:38.896088 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda78da08f_1ec1_4cc7_af55_d527da423778.slice/crio-ab4b704d6d5944fe2784e0e892ab9483726fa334ad8fd7228020f2741f56500f WatchSource:0}: Error finding container ab4b704d6d5944fe2784e0e892ab9483726fa334ad8fd7228020f2741f56500f: Status 404 returned error can't find the container with id ab4b704d6d5944fe2784e0e892ab9483726fa334ad8fd7228020f2741f56500f Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.896342 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.907493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.907550 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.907567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.907589 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.907647 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.919347 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: E1210 10:45:38.924169 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.930494 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.930553 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.930573 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.930598 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.930614 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.939453 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: E1210 10:45:38.944895 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.949842 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.949942 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.949960 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.949980 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.949994 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:38Z","lastTransitionTime":"2025-12-10T10:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.957993 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.958082 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:38 crc kubenswrapper[4780]: I1210 10:45:38.958189 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:38 crc kubenswrapper[4780]: E1210 10:45:38.958177 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:38 crc kubenswrapper[4780]: E1210 10:45:38.958327 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:38 crc kubenswrapper[4780]: E1210 10:45:38.958681 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:39 crc kubenswrapper[4780]: E1210 10:45:39.018589 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.019114 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:38Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.025803 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.025851 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.025862 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.025882 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.025893 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.032280 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: E1210 10:45:39.043335 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.047232 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.047269 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.047282 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.047302 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.047315 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.050480 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: E1210 10:45:39.061607 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: E1210 10:45:39.061786 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.063909 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.063961 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.063975 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.063992 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.064007 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.069901 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.086847 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.166662 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.166701 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.166712 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.166734 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.166747 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.187192 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.206865 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.269519 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.269582 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.269594 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.269630 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.269643 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.374280 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.374347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.374361 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.374571 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.374583 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.478991 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.479045 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.479055 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.479071 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.479082 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.609831 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.609915 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.609949 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.610014 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.610034 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.613615 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" event={"ID":"a78da08f-1ec1-4cc7-af55-d527da423778","Type":"ContainerStarted","Data":"920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.613715 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" event={"ID":"a78da08f-1ec1-4cc7-af55-d527da423778","Type":"ContainerStarted","Data":"ab4b704d6d5944fe2784e0e892ab9483726fa334ad8fd7228020f2741f56500f"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.618290 4780 generic.go:334] "Generic (PLEG): container finished" podID="5e522fb8-b104-4f14-a3a2-628fbe0ef36c" containerID="3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18" exitCode=0 Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.618327 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerDied","Data":"3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.642374 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.658110 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.681778 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.697996 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.714132 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.714239 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.714256 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.714309 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.714330 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.896871 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.907146 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.907227 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.907243 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.907366 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.907438 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:39Z","lastTransitionTime":"2025-12-10T10:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.925811 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.945243 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.962352 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.982682 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:39 crc kubenswrapper[4780]: I1210 10:45:39.999220 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:39Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.011942 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.011993 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.012007 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.012027 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.012040 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.016190 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.047783 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.063718 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.087559 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.114864 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.114956 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.114971 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.114997 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.115010 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.196466 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-46s5p"] Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.198103 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:40 crc kubenswrapper[4780]: E1210 10:45:40.198249 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.205846 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.205899 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg69p\" (UniqueName: \"kubernetes.io/projected/24187953-1dc5-48d7-b00c-1e5876604b6b-kube-api-access-jg69p\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.222558 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.222628 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.222640 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.222660 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.222679 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.227089 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.307605 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.307695 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg69p\" (UniqueName: \"kubernetes.io/projected/24187953-1dc5-48d7-b00c-1e5876604b6b-kube-api-access-jg69p\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:40 crc kubenswrapper[4780]: E1210 10:45:40.308306 4780 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:40 crc kubenswrapper[4780]: E1210 10:45:40.308669 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs podName:24187953-1dc5-48d7-b00c-1e5876604b6b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:40.808547186 +0000 UTC m=+45.661940629 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs") pod "network-metrics-daemon-46s5p" (UID: "24187953-1dc5-48d7-b00c-1e5876604b6b") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.357206 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.357267 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.357278 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.357314 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.357333 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.361343 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.373131 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg69p\" (UniqueName: \"kubernetes.io/projected/24187953-1dc5-48d7-b00c-1e5876604b6b-kube-api-access-jg69p\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.381218 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.396588 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.410388 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.430320 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.445125 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.461751 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.461819 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.461835 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.461863 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.461878 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.478157 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.493154 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.512140 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.525852 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.543103 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.557978 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.565014 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.565066 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.565083 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.565105 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.565121 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.576723 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.626569 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.668099 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.668141 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.668151 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.668170 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.668181 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.771486 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.771552 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.771575 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.771596 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.771608 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.816754 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:40 crc kubenswrapper[4780]: E1210 10:45:40.817043 4780 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:40 crc kubenswrapper[4780]: E1210 10:45:40.817195 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs podName:24187953-1dc5-48d7-b00c-1e5876604b6b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:41.817161786 +0000 UTC m=+46.670555409 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs") pod "network-metrics-daemon-46s5p" (UID: "24187953-1dc5-48d7-b00c-1e5876604b6b") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.874147 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.874200 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.874212 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.874229 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.874239 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.958479 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.958501 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:40 crc kubenswrapper[4780]: E1210 10:45:40.958701 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.958513 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:40 crc kubenswrapper[4780]: E1210 10:45:40.959130 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:40 crc kubenswrapper[4780]: E1210 10:45:40.959268 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.978366 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.978483 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.978498 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.978525 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:40 crc kubenswrapper[4780]: I1210 10:45:40.978538 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:40Z","lastTransitionTime":"2025-12-10T10:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.081881 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.081976 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.081988 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.082010 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.082040 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:41Z","lastTransitionTime":"2025-12-10T10:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.184858 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.184899 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.184908 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.184936 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.184945 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:41Z","lastTransitionTime":"2025-12-10T10:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.288330 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.288396 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.288412 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.288438 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.288454 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:41Z","lastTransitionTime":"2025-12-10T10:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.394331 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.394406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.394429 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.394472 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.394488 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:41Z","lastTransitionTime":"2025-12-10T10:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.497296 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.497350 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.497370 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.497387 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.497398 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:41Z","lastTransitionTime":"2025-12-10T10:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.602692 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.602790 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.602838 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.602891 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:41 crc kubenswrapper[4780]: I1210 10:45:41.602963 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:41Z","lastTransitionTime":"2025-12-10T10:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.054446 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:42 crc kubenswrapper[4780]: E1210 10:45:42.054689 4780 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:42 crc kubenswrapper[4780]: E1210 10:45:42.054847 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs podName:24187953-1dc5-48d7-b00c-1e5876604b6b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:44.054750295 +0000 UTC m=+48.908143738 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs") pod "network-metrics-daemon-46s5p" (UID: "24187953-1dc5-48d7-b00c-1e5876604b6b") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.055186 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:42 crc kubenswrapper[4780]: E1210 10:45:42.055414 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.061668 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.061712 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.061724 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.061741 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.061751 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:42Z","lastTransitionTime":"2025-12-10T10:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.074480 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:42 crc kubenswrapper[4780]: E1210 10:45:42.074596 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.224879 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerStarted","Data":"0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.224972 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" event={"ID":"a78da08f-1ec1-4cc7-af55-d527da423778","Type":"ContainerStarted","Data":"6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.227429 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.227491 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.227504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.227597 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.227617 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:42Z","lastTransitionTime":"2025-12-10T10:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.233638 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.248194 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.273766 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.320385 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.330412 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.330476 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.330487 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.330504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.330514 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:42Z","lastTransitionTime":"2025-12-10T10:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.336670 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.353344 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.366057 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.382480 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.399350 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.418408 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.433301 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.434165 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.434222 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.434247 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.434289 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.434313 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:42Z","lastTransitionTime":"2025-12-10T10:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.450147 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.467327 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.483607 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.500343 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.514735 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.527348 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.537108 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.537156 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.537168 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.537181 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.537190 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:42Z","lastTransitionTime":"2025-12-10T10:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.551605 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.565648 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.576697 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.591857 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.604138 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.615951 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.628120 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.640869 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.640946 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.640960 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.640975 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.640986 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:42Z","lastTransitionTime":"2025-12-10T10:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.643824 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.654771 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.675530 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:42 crc kubenswrapper[4780]: I1210 10:45:42.691240 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:42Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.033453 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.033943 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.033839 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.034133 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.039932 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.040001 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.040012 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.040028 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.040038 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.045361 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.063980 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.143364 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.143432 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.143447 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.143472 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.143485 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.256574 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.256636 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.256647 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.256669 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.256681 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.359530 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.359586 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.359598 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.359615 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.359628 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.439379 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.439722 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:59.439674923 +0000 UTC m=+64.293068386 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.439894 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.440043 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.440134 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.440238 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440261 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440295 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440333 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440396 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:59.440382011 +0000 UTC m=+64.293775514 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440433 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440550 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:59.440521954 +0000 UTC m=+64.293915427 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440573 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440669 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:59.440641847 +0000 UTC m=+64.294035330 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.440989 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.441008 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.441020 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.441078 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:59.441063288 +0000 UTC m=+64.294456731 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.462485 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.462536 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.462548 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.462567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.462580 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.565696 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.565730 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.565739 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.565754 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.565763 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.670314 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.670377 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.670393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.670416 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.670428 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.773714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.774043 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.774339 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.774558 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.774739 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.877048 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.877293 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.877364 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.877429 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.877550 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:43Z","lastTransitionTime":"2025-12-10T10:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.959212 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.959344 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.959469 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:43 crc kubenswrapper[4780]: E1210 10:45:43.960259 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:43 crc kubenswrapper[4780]: I1210 10:45:43.960897 4780 scope.go:117] "RemoveContainer" containerID="f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.064246 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.064319 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.064332 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.064387 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.064411 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.135075 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:44 crc kubenswrapper[4780]: E1210 10:45:44.135264 4780 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:44 crc kubenswrapper[4780]: E1210 10:45:44.135324 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs podName:24187953-1dc5-48d7-b00c-1e5876604b6b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:48.135309131 +0000 UTC m=+52.988702574 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs") pod "network-metrics-daemon-46s5p" (UID: "24187953-1dc5-48d7-b00c-1e5876604b6b") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.168582 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.168855 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.168974 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.169086 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.169169 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.271515 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.271580 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.271594 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.271611 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.271626 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.397760 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.397798 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.397811 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.397828 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.397837 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.501524 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.501597 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.501614 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.501684 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.501697 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.592364 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.639920 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.639967 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.639982 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.639999 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.640008 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.643828 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.668135 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.691751 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.712155 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.736403 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.742625 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.742868 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.742990 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.743129 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.743216 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.826393 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.839228 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.846465 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.846505 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.846519 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.846536 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.846547 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.859326 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.873468 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.890119 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.915247 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.936775 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.950869 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.950913 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.950925 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.950958 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.951000 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:44Z","lastTransitionTime":"2025-12-10T10:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.952204 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.958123 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:44 crc kubenswrapper[4780]: E1210 10:45:44.958237 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.958307 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:44 crc kubenswrapper[4780]: E1210 10:45:44.958371 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.972505 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.985791 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4780]: I1210 10:45:44.998992 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.054784 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.054835 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.054845 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.054869 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.054879 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.087436 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.088695 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.089119 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.103174 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.117735 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.157812 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.157866 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.157879 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.157903 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.157919 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.158487 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.181781 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.200115 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.253315 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.259826 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.259857 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.259866 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.259881 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.259889 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.270172 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.288082 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.304960 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.316018 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.339733 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.354880 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.363522 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.363555 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.363567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.363583 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.363598 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.370318 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.389791 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.406235 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.422670 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.466559 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.466602 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.466611 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.466626 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.466636 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.569680 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.569725 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.569736 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.569754 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.569765 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.674037 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.674104 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.674119 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.674138 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.674150 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.777909 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.777967 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.777980 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.777994 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.778004 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.881711 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.881758 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.881773 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.881794 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.881811 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.958152 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:45 crc kubenswrapper[4780]: E1210 10:45:45.958316 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.958996 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:45 crc kubenswrapper[4780]: E1210 10:45:45.959269 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.976866 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.984580 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.984632 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.984645 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.984662 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.984675 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4780]: I1210 10:45:45.996913 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.011243 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.032890 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.048582 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.063877 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.077988 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.088284 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.088332 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.088345 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.088365 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.088394 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.097084 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerDied","Data":"0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077"} Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.097151 4780 generic.go:334] "Generic (PLEG): container finished" podID="5e522fb8-b104-4f14-a3a2-628fbe0ef36c" containerID="0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077" exitCode=0 Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.098684 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.115250 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.136664 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.154699 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.172809 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.187139 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.197419 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.197480 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.197497 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.197523 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.197538 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.216261 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.233718 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.245848 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.260498 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.278026 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.292591 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.299539 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.299583 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.299595 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.299613 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.299624 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.306336 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.318838 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.331340 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.346571 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.358728 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.382656 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.399349 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.402449 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.402516 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.402530 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.402554 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.402567 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.414778 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.428256 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.444232 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.460142 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.473765 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.486409 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.504987 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.505040 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.505052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.505068 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.505078 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.608246 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.608290 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.608300 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.608317 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.608330 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.711048 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.711107 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.711124 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.711147 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4780]: I1210 10:45:46.711166 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.271773 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:47 crc kubenswrapper[4780]: E1210 10:45:47.271958 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.272345 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:47 crc kubenswrapper[4780]: E1210 10:45:47.272404 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.272446 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:47 crc kubenswrapper[4780]: E1210 10:45:47.272486 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.272511 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:47 crc kubenswrapper[4780]: E1210 10:45:47.272592 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.276053 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.276095 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.276105 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.276124 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.276138 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.378732 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.378786 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.378799 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.378823 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.378839 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.482094 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.482152 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.482166 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.482184 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.482211 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.585578 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.585625 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.585635 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.585656 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.585667 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.688633 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.688697 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.688715 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.688741 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.688758 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.791626 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.791666 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.791676 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.791690 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.791699 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.895181 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.895235 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.895248 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.895266 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.895278 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.997605 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.997755 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.997778 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.997802 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4780]: I1210 10:45:47.997823 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.101512 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.101884 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.101907 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.101963 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.101983 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.178118 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:48 crc kubenswrapper[4780]: E1210 10:45:48.178321 4780 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:48 crc kubenswrapper[4780]: E1210 10:45:48.178393 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs podName:24187953-1dc5-48d7-b00c-1e5876604b6b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:56.178375481 +0000 UTC m=+61.031768924 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs") pod "network-metrics-daemon-46s5p" (UID: "24187953-1dc5-48d7-b00c-1e5876604b6b") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.205509 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.205795 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.205808 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.205827 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.205838 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.280851 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/0.log" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.283358 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc" exitCode=1 Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.283450 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.284462 4780 scope.go:117] "RemoveContainer" containerID="547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.287651 4780 generic.go:334] "Generic (PLEG): container finished" podID="5e522fb8-b104-4f14-a3a2-628fbe0ef36c" containerID="3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4" exitCode=0 Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.287677 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerDied","Data":"3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.297459 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.309018 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.309051 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.309061 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.309077 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.309089 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.311703 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.325066 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.338582 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.355510 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.373429 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.385993 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.400335 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.411999 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.412043 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.412054 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.412070 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.412080 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.418079 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.432785 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.446623 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.466450 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:46.406982 5937 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:46.407076 5937 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 10:45:46.407098 5937 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 10:45:46.407136 5937 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:46.407172 5937 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 10:45:46.407186 5937 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 10:45:46.407183 5937 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:46.407206 5937 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 10:45:46.407210 5937 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:46.407161 5937 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:46.407235 5937 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1210 10:45:46.407243 5937 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:46.407286 5937 factory.go:656] Stopping watch factory\\\\nI1210 10:45:46.407293 5937 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:46.407318 5937 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:45:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.482953 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.495668 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.511521 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.514884 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.514968 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.514980 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.514998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.515008 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.523638 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.537118 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.551237 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.566908 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.581065 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.600672 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:46.406982 5937 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:46.407076 5937 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 10:45:46.407098 5937 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 10:45:46.407136 5937 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:46.407172 5937 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 10:45:46.407186 5937 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 10:45:46.407183 5937 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:46.407206 5937 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 10:45:46.407210 5937 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:46.407161 5937 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:46.407235 5937 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1210 10:45:46.407243 5937 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:46.407286 5937 factory.go:656] Stopping watch factory\\\\nI1210 10:45:46.407293 5937 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:46.407318 5937 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:45:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.613309 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.618403 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.618444 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.618459 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.618477 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.618491 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.630198 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.644259 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.658210 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.673287 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.692523 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.704994 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.718730 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.721333 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.721376 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.721389 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.721406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.721419 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.733024 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.747258 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.758535 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.824493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.824542 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.824554 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.824571 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.824582 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.927322 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.927379 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.927393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.927413 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.927426 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.958425 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.958469 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.958481 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:48 crc kubenswrapper[4780]: I1210 10:45:48.958438 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:48 crc kubenswrapper[4780]: E1210 10:45:48.958616 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:48 crc kubenswrapper[4780]: E1210 10:45:48.958729 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:48 crc kubenswrapper[4780]: E1210 10:45:48.958890 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:48 crc kubenswrapper[4780]: E1210 10:45:48.959033 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.032873 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.032948 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.032962 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.033025 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.033040 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.135398 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.135460 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.135471 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.135489 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.135499 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.197667 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.197704 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.197714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.197730 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.197739 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: E1210 10:45:49.211014 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.215647 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.215700 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.215714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.215736 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.215747 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: E1210 10:45:49.238850 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.246608 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.246666 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.246697 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.246712 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.246722 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: E1210 10:45:49.260330 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.264400 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.264452 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.264461 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.264479 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.264491 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: E1210 10:45:49.280972 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.285359 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.285400 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.285410 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.285427 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.285436 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.294642 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/0.log" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.299190 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.299707 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:45:49 crc kubenswrapper[4780]: E1210 10:45:49.302783 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: E1210 10:45:49.303023 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.304830 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.304875 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.304893 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.304989 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.305014 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.308088 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" event={"ID":"5e522fb8-b104-4f14-a3a2-628fbe0ef36c","Type":"ContainerStarted","Data":"f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.320375 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.338962 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.354692 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.375095 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.393063 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.408198 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.408439 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.408468 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.408477 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.408493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.408504 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.419832 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.447289 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:46.406982 5937 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:46.407076 5937 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 10:45:46.407098 5937 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 10:45:46.407136 5937 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:46.407172 5937 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 10:45:46.407186 5937 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 10:45:46.407183 5937 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:46.407206 5937 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 10:45:46.407210 5937 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:46.407161 5937 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:46.407235 5937 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1210 10:45:46.407243 5937 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:46.407286 5937 factory.go:656] Stopping watch factory\\\\nI1210 10:45:46.407293 5937 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:46.407318 5937 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:45:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.466695 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.488688 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.502383 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.511567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.511633 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.511647 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.511668 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.511679 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.514322 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.528479 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.547491 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.562786 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.578163 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.614992 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.615038 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.615053 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.615071 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.615084 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.617373 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.684143 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.718087 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.718141 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.718152 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.718171 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.718183 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.719551 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.737518 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.751423 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.764004 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.779324 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.791418 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.816396 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:46.406982 5937 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:46.407076 5937 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 10:45:46.407098 5937 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 10:45:46.407136 5937 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:46.407172 5937 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 10:45:46.407186 5937 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 10:45:46.407183 5937 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:46.407206 5937 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 10:45:46.407210 5937 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:46.407161 5937 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:46.407235 5937 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1210 10:45:46.407243 5937 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:46.407286 5937 factory.go:656] Stopping watch factory\\\\nI1210 10:45:46.407293 5937 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:46.407318 5937 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:45:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.820669 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.820828 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.820897 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.820985 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.821074 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.832024 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.851255 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.867221 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.880324 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.899716 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.914281 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.924013 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.924062 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.924074 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.924091 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.924100 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4780]: I1210 10:45:49.926446 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.028445 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.028500 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.028510 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.028529 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.028538 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.131483 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.131553 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.131578 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.131626 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.131652 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.235142 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.235182 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.235193 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.235208 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.235218 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.338311 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.338541 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.338577 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.338610 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.338630 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.441347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.441417 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.441434 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.441462 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.441480 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.544791 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.545209 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.545478 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.545620 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.545727 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.648475 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.648519 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.648535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.648565 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.648591 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.756332 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.756514 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.756569 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.756682 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.756705 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.860580 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.860623 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.860635 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.860667 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.860681 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.958950 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.959296 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:50 crc kubenswrapper[4780]: E1210 10:45:50.959378 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.958955 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.958951 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:50 crc kubenswrapper[4780]: E1210 10:45:50.959700 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:50 crc kubenswrapper[4780]: E1210 10:45:50.959792 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:50 crc kubenswrapper[4780]: E1210 10:45:50.959980 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.963542 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.963607 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.963618 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.963630 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4780]: I1210 10:45:50.963644 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.066874 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.067305 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.067403 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.067503 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.067605 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.171216 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.171267 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.171278 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.171295 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.171306 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.274378 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.274437 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.274456 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.274475 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.274492 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.319897 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/1.log" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.320742 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/0.log" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.323250 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13" exitCode=1 Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.323305 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.323390 4780 scope.go:117] "RemoveContainer" containerID="547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.324537 4780 scope.go:117] "RemoveContainer" containerID="d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13" Dec 10 10:45:51 crc kubenswrapper[4780]: E1210 10:45:51.324796 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.345190 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.360369 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.374975 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.378888 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.378970 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.378989 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.379013 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.379029 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.396690 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.409846 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.424597 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.444422 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:46.406982 5937 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:46.407076 5937 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 10:45:46.407098 5937 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 10:45:46.407136 5937 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:46.407172 5937 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 10:45:46.407186 5937 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 10:45:46.407183 5937 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:46.407206 5937 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 10:45:46.407210 5937 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:46.407161 5937 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:46.407235 5937 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1210 10:45:46.407243 5937 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:46.407286 5937 factory.go:656] Stopping watch factory\\\\nI1210 10:45:46.407293 5937 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:46.407318 5937 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:45:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"message\\\":\\\"rator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.21:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {58a148b3-0a7b-4412-b447-f87788c4883f}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:45:49.480966 6272 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 215.696µs)\\\\nF1210 10:45:49.480983 6272 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.457679 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.470684 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.482803 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.482837 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.482847 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.482862 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.482871 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.485678 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.500280 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.514320 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.527477 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.544454 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.560956 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.572286 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.585873 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.585951 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.585966 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.585984 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.585997 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.689691 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.689753 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.689765 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.689784 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.689797 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.792540 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.792612 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.792630 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.792691 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.792717 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.895441 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.895499 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.895511 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.895527 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.895538 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.999090 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.999131 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.999140 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.999156 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4780]: I1210 10:45:51.999167 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.103486 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.104058 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.104126 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.104161 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.104186 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.206894 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.206976 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.206998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.207022 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.207039 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.309347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.309396 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.309410 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.309428 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.309440 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.327737 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/1.log" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.413504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.413551 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.413561 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.413587 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.413599 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.520504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.520579 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.520610 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.520637 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.520653 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.623277 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.623313 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.623324 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.623340 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.623350 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.725950 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.726037 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.726049 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.726066 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.726078 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.828684 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.828731 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.828744 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.828764 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.828777 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.930820 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.930867 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.930878 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.930894 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.930905 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.957821 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:52 crc kubenswrapper[4780]: E1210 10:45:52.958035 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.958443 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:52 crc kubenswrapper[4780]: E1210 10:45:52.958504 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.958550 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:52 crc kubenswrapper[4780]: I1210 10:45:52.958565 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:52 crc kubenswrapper[4780]: E1210 10:45:52.958812 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:52 crc kubenswrapper[4780]: E1210 10:45:52.958696 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.033781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.033832 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.033851 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.033872 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.033882 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.137317 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.137639 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.137866 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.138018 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.138135 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.241281 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.241346 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.241360 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.241381 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.241393 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.344904 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.344989 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.345005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.345028 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.345040 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.449683 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.450254 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.450269 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.450286 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.450296 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.553983 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.554062 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.554081 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.554107 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.554123 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.657809 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.657956 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.657972 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.657994 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.658005 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.761080 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.761341 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.761454 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.761586 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.761658 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.864775 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.864815 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.864825 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.864841 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.864853 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.967268 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.967329 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.967346 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.967367 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4780]: I1210 10:45:53.967379 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.069944 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.069988 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.069999 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.070018 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.070036 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.172975 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.173003 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.173011 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.173025 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.173034 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.275525 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.276362 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.276412 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.276441 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.276456 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.379740 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.379791 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.379865 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.379890 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.379902 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.482900 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.483517 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.483654 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.483741 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.483821 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.586952 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.586998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.587007 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.587027 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.587037 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.689891 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.689965 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.689981 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.689998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.690009 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.793978 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.794021 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.794033 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.794049 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.794058 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.896799 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.896858 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.896907 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.896943 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.896955 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.958101 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.958115 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.958168 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.958204 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:54 crc kubenswrapper[4780]: E1210 10:45:54.958775 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:54 crc kubenswrapper[4780]: E1210 10:45:54.958854 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:54 crc kubenswrapper[4780]: E1210 10:45:54.958884 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:54 crc kubenswrapper[4780]: E1210 10:45:54.959126 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.999572 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.999619 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.999629 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.999645 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4780]: I1210 10:45:54.999656 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.102188 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.102232 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.102251 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.102274 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.102284 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.205044 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.205110 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.205121 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.205139 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.205151 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.308840 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.308898 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.308910 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.308952 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.308965 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.412198 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.412273 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.412286 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.412306 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.412318 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.515969 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.516027 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.516045 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.516065 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.516075 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.618627 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.618672 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.618682 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.618695 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.618704 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.722327 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.722689 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.722824 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.722981 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.723077 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.826223 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.826259 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.826268 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.826283 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.826293 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.928502 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.928547 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.928560 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.928578 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.928589 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.975114 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4780]: I1210 10:45:55.992285 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.006874 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.021240 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.031776 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.031818 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.031831 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.031851 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.031865 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.039754 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.056042 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.074251 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.101050 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.116561 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.136018 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.136073 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.136083 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.136101 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.136110 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.140821 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:46.406982 5937 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:46.407076 5937 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 10:45:46.407098 5937 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 10:45:46.407136 5937 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:46.407172 5937 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 10:45:46.407186 5937 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 10:45:46.407183 5937 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:46.407206 5937 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 10:45:46.407210 5937 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:46.407161 5937 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:46.407235 5937 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1210 10:45:46.407243 5937 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:46.407286 5937 factory.go:656] Stopping watch factory\\\\nI1210 10:45:46.407293 5937 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:46.407318 5937 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:45:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"message\\\":\\\"rator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.21:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {58a148b3-0a7b-4412-b447-f87788c4883f}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:45:49.480966 6272 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 215.696µs)\\\\nF1210 10:45:49.480983 6272 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.158180 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.172057 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.192794 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.204233 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.217019 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.230943 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.240608 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.240688 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.240705 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.240739 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.240759 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.249098 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:56 crc kubenswrapper[4780]: E1210 10:45:56.249387 4780 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:56 crc kubenswrapper[4780]: E1210 10:45:56.249507 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs podName:24187953-1dc5-48d7-b00c-1e5876604b6b nodeName:}" failed. No retries permitted until 2025-12-10 10:46:12.249468235 +0000 UTC m=+77.102861678 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs") pod "network-metrics-daemon-46s5p" (UID: "24187953-1dc5-48d7-b00c-1e5876604b6b") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.343745 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.343802 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.343816 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.343835 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.343848 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.447180 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.447238 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.447255 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.447275 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.447288 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.550633 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.550671 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.550685 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.550701 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.550713 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.654264 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.654332 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.654349 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.654372 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.654390 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.757431 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.757499 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.757517 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.757541 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.757559 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.860807 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.860873 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.860895 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.861002 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.861044 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.958038 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.958253 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:56 crc kubenswrapper[4780]: E1210 10:45:56.958330 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.958379 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:56 crc kubenswrapper[4780]: E1210 10:45:56.958566 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.958646 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:56 crc kubenswrapper[4780]: E1210 10:45:56.958705 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:56 crc kubenswrapper[4780]: E1210 10:45:56.958776 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.963281 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.963334 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.963356 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.963383 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4780]: I1210 10:45:56.963403 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.066405 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.066496 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.066528 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.066568 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.066595 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.170053 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.170089 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.170100 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.170116 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.170129 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.273564 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.273618 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.273631 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.273649 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.273678 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.298963 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.318976 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.338509 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.361851 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.377482 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.377567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.377579 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.377598 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.377611 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.379678 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.394676 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.409136 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.429767 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://547474a5e2f6912cd7f0fd6c25cdaf68ff49af2ba46c782bf7dc81e8052dbcfc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:46.406982 5937 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:46.407076 5937 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 10:45:46.407098 5937 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 10:45:46.407136 5937 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:46.407172 5937 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 10:45:46.407186 5937 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 10:45:46.407183 5937 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:46.407206 5937 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 10:45:46.407210 5937 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:46.407161 5937 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:46.407235 5937 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1210 10:45:46.407243 5937 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:46.407286 5937 factory.go:656] Stopping watch factory\\\\nI1210 10:45:46.407293 5937 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:46.407318 5937 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:45:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"message\\\":\\\"rator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.21:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {58a148b3-0a7b-4412-b447-f87788c4883f}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:45:49.480966 6272 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 215.696µs)\\\\nF1210 10:45:49.480983 6272 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.444290 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.459411 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.477529 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.480360 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.480440 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.480451 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.480471 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.480488 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.492252 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.508319 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.522686 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.540585 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.555977 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.569783 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.583351 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.583424 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.583439 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.583457 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.583472 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.686382 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.686423 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.686433 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.686458 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.686469 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.789271 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.789305 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.789316 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.789330 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.789339 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.892423 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.892516 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.892535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.892558 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.892575 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.995199 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.995578 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.995667 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.995747 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4780]: I1210 10:45:57.996001 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.099189 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.099243 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.099256 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.099276 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.099292 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.202603 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.202648 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.202659 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.202675 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.202687 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.306526 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.306607 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.306617 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.306638 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.306691 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.409857 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.410196 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.410338 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.410446 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.410554 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.515298 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.515714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.515827 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.515971 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.516086 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.618608 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.618638 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.618646 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.618660 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.618669 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.721525 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.721587 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.721600 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.721620 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.721633 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.825093 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.825198 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.825214 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.825237 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.825279 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.928443 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.928525 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.928536 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.928554 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.928565 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.958058 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:58 crc kubenswrapper[4780]: E1210 10:45:58.958231 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.958840 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:45:58 crc kubenswrapper[4780]: E1210 10:45:58.958954 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.959023 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:58 crc kubenswrapper[4780]: E1210 10:45:58.959104 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:58 crc kubenswrapper[4780]: I1210 10:45:58.959158 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:58 crc kubenswrapper[4780]: E1210 10:45:58.959217 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.031296 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.031332 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.031343 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.031359 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.031370 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.134159 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.134500 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.134595 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.134741 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.134835 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.238140 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.238189 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.238202 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.238216 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.238227 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.341379 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.341442 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.341454 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.341480 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.341498 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.408392 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.408446 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.408460 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.408478 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.408489 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.422962 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:59Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.428934 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.428984 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.428994 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.429013 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.429027 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.441266 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:59Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.445447 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.445499 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.445515 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.445535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.445548 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.460298 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:59Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.464504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.464541 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.464555 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.464573 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.464608 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.478760 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:59Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.482663 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.482703 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.482716 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.482734 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.482748 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.484161 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.484273 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.484326 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.484366 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484399 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:46:31.484374879 +0000 UTC m=+96.337768322 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484433 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.484462 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484473 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484500 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484514 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484487 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:31.484476882 +0000 UTC m=+96.337870325 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484560 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:31.484548234 +0000 UTC m=+96.337941677 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484562 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484625 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:31.484613875 +0000 UTC m=+96.338007318 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.484898 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.485024 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.485056 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.485203 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:31.485168419 +0000 UTC m=+96.338561862 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.498765 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:59Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:59 crc kubenswrapper[4780]: E1210 10:45:59.499021 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.501567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.501703 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.501813 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.501954 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.502100 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.604969 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.605086 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.605368 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.605402 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.605417 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.710776 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.711562 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.711659 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.711757 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.711851 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.815258 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.815335 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.815351 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.815374 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.815387 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.925049 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.925216 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.925245 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.925378 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4780]: I1210 10:45:59.925449 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.029363 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.029412 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.029422 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.029440 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.029450 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.133738 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.133797 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.133809 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.133834 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.133847 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.236933 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.236990 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.237002 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.237021 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.237034 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.340714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.340770 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.340781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.340803 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.340815 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.443809 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.443952 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.443972 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.443995 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.444009 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.547216 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.547275 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.547289 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.547311 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.547324 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.651505 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.651556 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.651570 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.651591 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.651603 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.754704 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.754753 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.754783 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.754805 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.754820 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.858072 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.858155 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.858172 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.859278 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.859877 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.958131 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.958195 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.958131 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.958402 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:00 crc kubenswrapper[4780]: E1210 10:46:00.958513 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:00 crc kubenswrapper[4780]: E1210 10:46:00.958690 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:00 crc kubenswrapper[4780]: E1210 10:46:00.958812 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:00 crc kubenswrapper[4780]: E1210 10:46:00.959112 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.963123 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.963167 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.963181 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.963202 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4780]: I1210 10:46:00.963214 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.066117 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.066170 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.066185 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.066204 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.066217 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.168744 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.168793 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.168804 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.168821 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.168835 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.271011 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.271071 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.271131 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.271158 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.271176 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.373624 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.373686 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.373713 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.373732 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.373744 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.477247 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.477288 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.477305 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.477323 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.477334 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.580587 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.580643 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.580655 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.580677 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.580695 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.683263 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.683306 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.683319 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.683340 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.683352 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.786960 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.787024 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.787040 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.787061 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.787074 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.890611 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.890693 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.890714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.890740 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.890754 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.994198 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.994245 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.994259 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.994285 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4780]: I1210 10:46:01.994297 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.097942 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.097995 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.098005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.098026 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.098037 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.201365 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.201425 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.201435 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.201454 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.201463 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.304347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.304404 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.304417 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.304432 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.304442 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.406906 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.406973 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.406987 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.407010 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.407024 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.510121 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.510179 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.510196 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.510217 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.510230 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.613623 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.613685 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.613699 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.613718 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.613734 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.717049 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.717122 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.717136 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.717158 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4780]: I1210 10:46:02.717169 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.093909 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.094021 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:03 crc kubenswrapper[4780]: E1210 10:46:03.094091 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:03 crc kubenswrapper[4780]: E1210 10:46:03.094211 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.094276 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.094382 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.094424 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.094436 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.094456 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: E1210 10:46:03.094436 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.094292 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.094466 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: E1210 10:46:03.094614 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.197264 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.197327 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.197343 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.197368 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.197381 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.300694 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.300743 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.300756 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.300780 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.300792 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.403068 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.403126 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.403139 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.403158 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.403172 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.506230 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.506285 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.506300 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.506340 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.506359 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.609044 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.609091 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.609103 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.609135 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.609147 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.713191 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.713253 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.713271 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.713300 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.713318 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.823547 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.823631 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.823657 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.823686 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.823699 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.927534 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.927585 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.927596 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.927613 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.927623 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.959389 4780 scope.go:117] "RemoveContainer" containerID="d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.976434 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:03Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:03 crc kubenswrapper[4780]: I1210 10:46:03.991701 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:03Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.006970 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.030403 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.033357 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.033427 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.033445 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.033504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.033529 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.050257 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.070695 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.097687 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"message\\\":\\\"rator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.21:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {58a148b3-0a7b-4412-b447-f87788c4883f}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:45:49.480966 6272 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 215.696µs)\\\\nF1210 10:45:49.480983 6272 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.114766 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.214976 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.215034 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.215049 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.215068 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.215080 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.218267 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.243275 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.260223 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.279271 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.295119 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.309941 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.317681 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.317723 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.317736 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.317754 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.317768 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.324621 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.348200 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.378472 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/1.log" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.382388 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.382998 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.405071 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.420451 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.420498 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.420507 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.420547 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.420558 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.422252 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.437348 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.460839 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.475989 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.486980 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.506306 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"message\\\":\\\"rator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.21:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {58a148b3-0a7b-4412-b447-f87788c4883f}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:45:49.480966 6272 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 215.696µs)\\\\nF1210 10:45:49.480983 6272 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.517091 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.522617 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.522660 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.522676 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.522694 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.522708 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.528481 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.541158 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.551673 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.562271 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.575139 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.594278 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.609635 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.625284 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:04Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.625644 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.625669 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.625677 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.625691 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.625700 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.730199 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.730262 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.730283 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.730303 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.730321 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.833869 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.834196 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.834274 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.834348 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.834472 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.937617 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.937670 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.937682 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.937700 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.937711 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.957874 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.958000 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.958030 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:04 crc kubenswrapper[4780]: E1210 10:46:04.958093 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:04 crc kubenswrapper[4780]: E1210 10:46:04.958155 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:04 crc kubenswrapper[4780]: E1210 10:46:04.958272 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:04 crc kubenswrapper[4780]: I1210 10:46:04.958335 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:04 crc kubenswrapper[4780]: E1210 10:46:04.958425 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.040658 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.040715 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.040725 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.040744 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.040756 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.143869 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.144274 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.144289 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.144305 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.144317 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.246888 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.246947 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.246962 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.246983 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.246998 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.350315 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.350360 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.350370 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.350386 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.350397 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.455622 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.455705 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.455731 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.455764 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.455792 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.559216 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.559259 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.559270 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.559287 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.559299 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.663393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.663500 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.663517 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.663549 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.663564 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.766689 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.766758 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.766772 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.766795 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.766808 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.870537 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.870615 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.870634 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.870667 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.870686 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.973496 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.973539 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.973554 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.973570 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.973583 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.979580 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:05Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:05 crc kubenswrapper[4780]: I1210 10:46:05.997609 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:05Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.011734 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.026633 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.044627 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.056360 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.070764 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.075989 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.076039 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.076052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.076072 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.076084 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.084558 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.099295 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.116065 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.129554 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.142434 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.153685 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.164837 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.174357 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.178599 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.178984 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.179019 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.179042 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.179065 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.201130 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"message\\\":\\\"rator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.21:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {58a148b3-0a7b-4412-b447-f87788c4883f}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:45:49.480966 6272 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 215.696µs)\\\\nF1210 10:45:49.480983 6272 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.282244 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.282316 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.282330 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.282348 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.282360 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.385896 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.385952 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.385964 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.385981 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.385992 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.390904 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/2.log" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.391571 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/1.log" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.394677 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788" exitCode=1 Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.394745 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.394880 4780 scope.go:117] "RemoveContainer" containerID="d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.396228 4780 scope.go:117] "RemoveContainer" containerID="8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788" Dec 10 10:46:06 crc kubenswrapper[4780]: E1210 10:46:06.396523 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.415583 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.427735 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.444053 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.462764 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.480174 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.489263 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.489319 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.489332 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.489352 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.489364 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.496036 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.511865 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.528246 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.542753 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.558193 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.568524 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.586221 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"message\\\":\\\"rator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.21:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {58a148b3-0a7b-4412-b447-f87788c4883f}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:45:49.480966 6272 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 215.696µs)\\\\nF1210 10:45:49.480983 6272 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.591244 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.591288 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.591300 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.591320 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.591334 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.598196 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.612516 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.624835 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.637713 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:06Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.694348 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.694398 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.694421 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.694438 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.694449 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.797355 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.797393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.797403 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.797418 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.797428 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.900740 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.900806 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.900817 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.900845 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.900863 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.958480 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.958528 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.958582 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:06 crc kubenswrapper[4780]: I1210 10:46:06.958483 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:06 crc kubenswrapper[4780]: E1210 10:46:06.958633 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:06 crc kubenswrapper[4780]: E1210 10:46:06.958716 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:06 crc kubenswrapper[4780]: E1210 10:46:06.958805 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:06 crc kubenswrapper[4780]: E1210 10:46:06.958912 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.003623 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.003671 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.003686 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.003707 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.003720 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.106946 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.107000 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.107010 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.107026 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.107036 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.210720 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.210762 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.210771 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.210785 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.210794 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.318092 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.318149 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.318162 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.318180 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.318194 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.401206 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/2.log" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.420749 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.420849 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.420859 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.420875 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.420885 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.523344 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.523389 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.523403 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.523423 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.523437 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.626144 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.626196 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.626208 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.626226 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.626239 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.729857 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.729932 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.729947 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.729969 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.729982 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.833311 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.833346 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.833355 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.833370 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.833381 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.952860 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.952957 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.952974 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.952996 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4780]: I1210 10:46:07.953014 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.056496 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.056567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.056582 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.056601 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.056614 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.159715 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.159767 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.159782 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.159802 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.159833 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.262951 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.262998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.263015 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.263044 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.263058 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.365950 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.365989 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.365997 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.366013 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.366023 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.469291 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.469345 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.469355 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.469372 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.469382 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.571376 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.571427 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.571441 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.571456 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.571467 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.674619 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.674688 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.674702 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.674724 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.674739 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.777742 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.777797 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.777808 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.777828 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.777840 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.881229 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.881319 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.881343 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.881370 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.881389 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.958089 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.958159 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.958199 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.958163 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:08 crc kubenswrapper[4780]: E1210 10:46:08.958340 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:08 crc kubenswrapper[4780]: E1210 10:46:08.958524 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:08 crc kubenswrapper[4780]: E1210 10:46:08.958608 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:08 crc kubenswrapper[4780]: E1210 10:46:08.958786 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.984503 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.984565 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.984577 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.984599 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4780]: I1210 10:46:08.984611 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.087909 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.087982 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.088000 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.088023 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.088036 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.191259 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.191306 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.191314 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.191331 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.191341 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.294823 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.294895 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.294908 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.294965 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.294978 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.759535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.759878 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.759979 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.760063 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.760126 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.762224 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.762321 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.762384 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.762445 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.762505 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: E1210 10:46:09.777669 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:09Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.782320 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.782356 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.782368 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.782388 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.782400 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: E1210 10:46:09.805608 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:09Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.810157 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.810191 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.810200 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.810216 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.810226 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: E1210 10:46:09.822858 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:09Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.827107 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.827205 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.827233 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.827268 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.827284 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: E1210 10:46:09.839972 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:09Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.843887 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.843943 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.843956 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.843975 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.843988 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: E1210 10:46:09.856961 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:09Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:09 crc kubenswrapper[4780]: E1210 10:46:09.857116 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.863197 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.863224 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.863233 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.863251 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.863265 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.965658 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.965729 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.965743 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.965761 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4780]: I1210 10:46:09.965775 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.069524 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.069583 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.069600 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.069621 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.069634 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.172897 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.173004 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.173027 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.173054 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.173078 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.277087 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.277152 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.277171 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.277197 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.277215 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.380134 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.380181 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.380193 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.380214 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.380227 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.483960 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.484009 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.484023 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.484044 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.484055 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.587283 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.587343 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.587354 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.587377 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.587390 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.690781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.690830 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.690841 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.690856 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.690868 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.880607 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.880656 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.880666 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.880687 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.880700 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.958689 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.958865 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:10 crc kubenswrapper[4780]: E1210 10:46:10.959017 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.958907 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:10 crc kubenswrapper[4780]: E1210 10:46:10.959149 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.958950 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:10 crc kubenswrapper[4780]: E1210 10:46:10.959190 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:10 crc kubenswrapper[4780]: E1210 10:46:10.959353 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.983562 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.983621 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.983634 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.983655 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4780]: I1210 10:46:10.983672 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.086344 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.086406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.086418 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.086438 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.086449 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.189243 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.189288 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.189305 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.189325 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.189338 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.292339 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.292395 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.292410 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.292432 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.292446 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.394991 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.395043 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.395053 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.395068 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.395081 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.497912 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.497998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.498009 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.498026 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.498037 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.600610 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.600654 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.600663 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.600678 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.600688 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.703494 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.703550 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.703571 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.703591 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.703605 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.808002 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.808089 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.808104 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.808130 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.808148 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.911644 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.911701 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.911729 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.911757 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4780]: I1210 10:46:11.911774 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.015908 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.015989 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.016005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.016026 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.016037 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.120008 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.120063 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.120074 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.120091 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.120103 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.223005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.223076 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.223089 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.223108 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.223121 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.291733 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:12 crc kubenswrapper[4780]: E1210 10:46:12.291954 4780 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:12 crc kubenswrapper[4780]: E1210 10:46:12.292116 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs podName:24187953-1dc5-48d7-b00c-1e5876604b6b nodeName:}" failed. No retries permitted until 2025-12-10 10:46:44.292078398 +0000 UTC m=+109.145471841 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs") pod "network-metrics-daemon-46s5p" (UID: "24187953-1dc5-48d7-b00c-1e5876604b6b") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.325770 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.325824 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.325838 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.325856 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.325867 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.429414 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.429475 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.429490 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.429509 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.429522 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.532989 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.533047 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.533065 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.533091 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.533110 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.636525 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.636574 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.636587 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.636607 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.636620 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.740228 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.740293 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.740307 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.740328 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.740343 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.842610 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.842694 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.842714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.842739 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.842757 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.946736 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.946843 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.946875 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.946911 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.946993 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.958741 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.958796 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.958796 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:12 crc kubenswrapper[4780]: I1210 10:46:12.958811 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:12 crc kubenswrapper[4780]: E1210 10:46:12.959025 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:12 crc kubenswrapper[4780]: E1210 10:46:12.959240 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:12 crc kubenswrapper[4780]: E1210 10:46:12.959403 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:12 crc kubenswrapper[4780]: E1210 10:46:12.959502 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.050261 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.050371 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.050397 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.050434 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.050544 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.154122 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.154218 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.154237 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.154266 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.154282 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.257354 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.257417 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.257434 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.257459 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.257477 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.361073 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.361137 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.361154 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.361181 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.361197 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.463628 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.463696 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.463709 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.463729 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.464166 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.567531 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.567617 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.567631 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.567649 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.567660 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.670867 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.670938 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.670948 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.670975 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.670989 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.774859 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.774983 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.774997 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.775028 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.775044 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.879005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.879119 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.879163 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.879197 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.879572 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.983487 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.983542 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.983557 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.983596 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4780]: I1210 10:46:13.983608 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.086605 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.086662 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.086678 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.086699 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.086713 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.189005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.189070 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.189080 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.189098 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.189108 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.296478 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.296537 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.296548 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.296562 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.296571 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.399468 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.399531 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.399542 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.399559 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.399570 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.502938 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.503022 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.503034 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.503059 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.503073 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.606152 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.606201 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.606211 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.606229 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.606243 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.715933 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.715987 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.715997 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.716014 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.716024 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.818544 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.818603 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.818613 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.818633 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.818644 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.922268 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.922312 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.922322 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4780]: I1210 10:46:14.922339 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:14.922349 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:14.958440 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:14.958513 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:14.958563 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:14.958457 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:15 crc kubenswrapper[4780]: E1210 10:46:14.958670 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:15 crc kubenswrapper[4780]: E1210 10:46:14.958807 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:15 crc kubenswrapper[4780]: E1210 10:46:14.958933 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:15 crc kubenswrapper[4780]: E1210 10:46:14.959035 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.135853 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.136144 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.136297 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.136394 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.136481 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.238599 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.239108 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.239268 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.239380 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.239480 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.342629 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.342671 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.342681 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.342696 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.342705 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.447108 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.447176 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.447189 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.447208 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.447221 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.550195 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.550262 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.550278 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.550300 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.550312 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.653043 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.653086 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.653104 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.653123 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.653137 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.757030 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.757109 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.757131 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.757161 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.757183 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.860186 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.860218 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.860226 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.860240 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.860249 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.963575 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.963620 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.963629 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.963645 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.963655 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.975164 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4780]: I1210 10:46:15.987889 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.004281 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.023151 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.041059 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.056202 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.066506 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.066550 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.066565 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.066582 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.066593 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.071963 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.089178 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.101989 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.113555 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.141090 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d077dbe18449f6a39b341d00aa17f12c5f31287117bd944c6e65135f02fd3b13\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"message\\\":\\\"rator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.21:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {58a148b3-0a7b-4412-b447-f87788c4883f}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:45:49.480966 6272 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 215.696µs)\\\\nF1210 10:45:49.480983 6272 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.155054 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.310160 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.310198 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.310207 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.310248 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.310260 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.326513 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.378396 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.408275 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.413789 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.413835 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.413846 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.413862 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.413872 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.426981 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.516341 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.516388 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.516397 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.516414 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.516426 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.619483 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.619536 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.619546 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.619646 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.619663 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.722983 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.723052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.723065 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.723133 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.723151 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.826913 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.826985 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.826998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.827014 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.827029 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.930213 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.930257 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.930266 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.930283 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.930294 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.957906 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.957999 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.958028 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:16 crc kubenswrapper[4780]: I1210 10:46:16.958177 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:16 crc kubenswrapper[4780]: E1210 10:46:16.958300 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:16 crc kubenswrapper[4780]: E1210 10:46:16.958382 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:16 crc kubenswrapper[4780]: E1210 10:46:16.958563 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:16 crc kubenswrapper[4780]: E1210 10:46:16.958642 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.034071 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.034122 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.034136 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.034154 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.034165 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.138333 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.138386 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.138404 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.138421 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.138433 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.242357 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.242397 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.242406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.242420 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.242431 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.345624 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.345685 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.345701 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.345728 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.345752 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.448404 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.448475 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.448491 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.448524 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.448541 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.552668 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.552718 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.552729 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.552746 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.552756 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.656058 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.656120 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.656135 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.656158 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.656169 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.759430 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.759490 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.759502 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.759527 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.759541 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.863380 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.863424 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.863434 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.863487 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.863502 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.973502 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.973811 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.973886 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.974040 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4780]: I1210 10:46:17.974157 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.078135 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.078487 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.078575 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.078662 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.078790 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.251031 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.251095 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.251113 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.251137 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.251154 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.354240 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.354300 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.354317 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.354338 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.354352 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.457362 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.457418 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.457430 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.457450 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.457473 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.560411 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.560493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.560517 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.560550 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.560574 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.664549 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.664639 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.664668 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.664701 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.664724 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.768556 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.768628 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.768642 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.768669 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.768679 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.871871 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.872028 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.872063 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.872093 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.872113 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.958248 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.958329 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.958363 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.958472 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:18 crc kubenswrapper[4780]: E1210 10:46:18.958489 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:18 crc kubenswrapper[4780]: E1210 10:46:18.959151 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:18 crc kubenswrapper[4780]: E1210 10:46:18.959245 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:18 crc kubenswrapper[4780]: E1210 10:46:18.959326 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.959754 4780 scope.go:117] "RemoveContainer" containerID="8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788" Dec 10 10:46:18 crc kubenswrapper[4780]: E1210 10:46:18.960378 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.975476 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.975570 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.975595 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.975627 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.975655 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.978754 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:18Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:18 crc kubenswrapper[4780]: I1210 10:46:18.998912 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:18Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.016773 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.034320 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.065482 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.077179 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.078421 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.078567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.078671 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.078784 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.078951 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.089682 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.103819 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.115260 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.133000 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.156694 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.173267 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.183551 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.183725 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.183803 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.183874 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.183961 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.193158 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.208691 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.222805 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.235052 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:19Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.287172 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.287235 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.287245 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.287265 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.287276 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.390040 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.390094 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.390104 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.390122 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.390132 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.493237 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.493287 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.493297 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.493318 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.493329 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.597342 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.597418 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.597437 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.597469 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.597495 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.700909 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.700992 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.701004 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.701032 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.701045 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.803856 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.804173 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.804245 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.804386 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.804461 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.910332 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.910438 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.910455 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.910481 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.910493 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.989837 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.989945 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.989969 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.990000 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4780]: I1210 10:46:19.990020 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.013402 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.019750 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.019822 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.019844 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.019879 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.019966 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.040219 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.047430 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.047543 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.047557 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.047587 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.047616 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.069383 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.075233 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.075295 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.075308 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.075331 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.075347 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.096036 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.102004 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.102062 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.102076 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.102097 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.102111 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.119506 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.119688 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.122577 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.122999 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.123104 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.123226 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.123335 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.226066 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.226165 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.226185 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.226210 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.226228 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.328840 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.329302 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.329378 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.329464 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.329571 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.432320 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.432395 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.432407 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.432426 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.432438 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.535675 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.535763 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.535792 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.535821 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.535839 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.639415 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.639489 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.639515 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.639539 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.639557 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.743055 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.743639 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.743714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.743782 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.744036 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.846867 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.846946 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.846958 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.846975 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.846985 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.949954 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.950045 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.950073 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.950113 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.950139 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.958302 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.958354 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.958409 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:20 crc kubenswrapper[4780]: I1210 10:46:20.958317 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.958481 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.958751 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.958734 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:20 crc kubenswrapper[4780]: E1210 10:46:20.958874 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:21 crc kubenswrapper[4780]: I1210 10:46:21.052838 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4780]: I1210 10:46:21.052882 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4780]: I1210 10:46:21.052892 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4780]: I1210 10:46:21.052908 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4780]: I1210 10:46:21.052937 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.133491 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:22 crc kubenswrapper[4780]: E1210 10:46:22.133609 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.133672 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.133731 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.133760 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.133775 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.133785 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.133785 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: E1210 10:46:22.133832 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.133859 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:22 crc kubenswrapper[4780]: E1210 10:46:22.133903 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.134005 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:22 crc kubenswrapper[4780]: E1210 10:46:22.134045 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.236764 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.236857 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.236878 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.236907 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.236966 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.340504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.340992 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.341175 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.341313 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.341436 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.444260 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.444338 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.444357 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.444382 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.444400 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.547144 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.547184 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.547198 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.547217 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.547229 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.650376 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.650424 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.650434 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.650450 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.650462 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.754528 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.754581 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.754591 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.754607 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.754618 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.858670 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.858726 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.858739 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.858756 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.858768 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.962230 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.962266 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.962275 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.962288 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4780]: I1210 10:46:22.962297 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.065235 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.065307 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.065324 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.065349 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.065366 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.139751 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/0.log" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.139807 4780 generic.go:334] "Generic (PLEG): container finished" podID="deadb49b-61b8-435f-8168-d7bd3c01b5ad" containerID="b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda" exitCode=1 Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.139844 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8cwb7" event={"ID":"deadb49b-61b8-435f-8168-d7bd3c01b5ad","Type":"ContainerDied","Data":"b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.140299 4780 scope.go:117] "RemoveContainer" containerID="b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.161253 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.169073 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.169114 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.169157 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.169181 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.169193 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.177219 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.193278 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.210253 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.236061 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.249989 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.264810 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.272169 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.272210 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.272220 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.272235 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.272246 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.281369 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.296430 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.314503 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.327361 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.352063 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.366672 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.374977 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.375032 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.375044 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.375063 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.375078 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.384039 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.399866 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.413375 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:23Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.478279 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.478328 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.478339 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.478359 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.478370 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.581507 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.581555 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.581568 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.581584 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.581596 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.684682 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.685029 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.685156 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.685277 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.685377 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.787720 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.787903 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.787939 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.787959 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.787972 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.891346 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.891381 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.891391 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.891406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.891415 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.959429 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:23 crc kubenswrapper[4780]: E1210 10:46:23.959592 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.959835 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:23 crc kubenswrapper[4780]: E1210 10:46:23.959906 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.960097 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:23 crc kubenswrapper[4780]: E1210 10:46:23.960161 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.961170 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:23 crc kubenswrapper[4780]: E1210 10:46:23.961495 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.993853 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.994430 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.994547 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.994664 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4780]: I1210 10:46:23.994758 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.098193 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.098453 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.098565 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.098650 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.098720 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.144374 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/0.log" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.144430 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8cwb7" event={"ID":"deadb49b-61b8-435f-8168-d7bd3c01b5ad","Type":"ContainerStarted","Data":"caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.169301 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.182700 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.201376 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.201445 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.201459 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.201485 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.201501 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.205621 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.226080 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.242186 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.255164 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.279434 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.295371 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.304247 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.304302 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.304315 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.304337 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.304410 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.310625 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.330179 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.348619 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.363522 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.376656 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.395561 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.406776 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.406813 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.406826 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.406844 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.406856 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.413288 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.428779 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:24Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.510545 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.510622 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.510637 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.510660 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.510676 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.614728 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.614931 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.614949 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.614968 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.615006 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.717912 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.717977 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.717987 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.718006 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.718017 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.821114 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.821178 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.821199 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.821225 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.821244 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.924835 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.924993 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.925013 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.925045 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.925062 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4780]: I1210 10:46:24.977168 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.028708 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.028767 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.028781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.028804 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.028818 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.131778 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.131826 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.131843 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.131860 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.131871 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.237307 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.237400 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.237419 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.237444 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.237459 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.341118 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.341179 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.341212 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.341233 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.341246 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.445272 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.445365 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.445386 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.445416 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.445473 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.548830 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.548893 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.548909 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.548943 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.548953 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.651350 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.651391 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.651400 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.651440 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.651452 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.754716 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.754771 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.754793 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.754824 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.754844 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.858800 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.858841 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.858850 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.858867 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.858876 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.958195 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.958269 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:25 crc kubenswrapper[4780]: E1210 10:46:25.958408 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.958510 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:25 crc kubenswrapper[4780]: E1210 10:46:25.958523 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:25 crc kubenswrapper[4780]: E1210 10:46:25.958894 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.959043 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:25 crc kubenswrapper[4780]: E1210 10:46:25.959120 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.961887 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.962005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.962026 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.962050 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.962066 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4780]: I1210 10:46:25.980452 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:25Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.001170 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88e16d6d-ae53-465a-a66f-e1aa2abbfb8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55c85e62eda4733b5f1e264e6903e3f61bc4759bdf3f891c5b513a2195e0daab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://325438d9694126427a6f6905508c0feb1ab3918532c7d6fd929e63b409574f5f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf105ae693d68f4e8d5eaae47d0827146c24312f507b378cf60de03a5034bfb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:25Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.018886 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.038811 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.060528 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.065840 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.065885 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.065899 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.065938 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.065952 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.081174 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.100077 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.115787 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.142624 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.156616 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.168876 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.168959 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.168979 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.168999 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.169016 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.172702 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.187848 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.203751 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.225265 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.241714 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.257080 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.272271 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.272349 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.272364 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.272393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.272404 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.272839 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.376047 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.376107 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.376124 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.376150 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.376170 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.479962 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.480009 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.480019 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.480043 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.480054 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.582968 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.583072 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.583087 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.583110 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.583128 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.686545 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.686625 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.686642 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.686659 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.686673 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.789648 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.789710 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.789719 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.789737 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.789746 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.893005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.893086 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.893109 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.893132 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.893145 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.996650 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.996707 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.996721 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.996736 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4780]: I1210 10:46:26.996746 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.099718 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.099782 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.099794 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.099814 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.099826 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.202983 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.203034 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.203073 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.203092 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.203105 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.306754 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.306839 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.306864 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.306895 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.306955 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.412077 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.412156 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.412186 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.412212 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.412227 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.515541 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.515598 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.515617 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.515640 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.515656 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.618953 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.619030 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.619043 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.619064 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.619076 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.722316 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.722377 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.722397 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.722420 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.722435 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.825960 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.826026 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.826042 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.826066 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.826081 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.930097 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.930218 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.930234 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.930257 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.930273 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.958668 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.958669 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.958822 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:27 crc kubenswrapper[4780]: E1210 10:46:27.959557 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:27 crc kubenswrapper[4780]: E1210 10:46:27.959525 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:27 crc kubenswrapper[4780]: I1210 10:46:27.959035 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:27 crc kubenswrapper[4780]: E1210 10:46:27.959991 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:27 crc kubenswrapper[4780]: E1210 10:46:27.960207 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.033423 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.033497 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.033510 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.033531 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.033545 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.137799 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.137865 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.137957 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.137990 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.138006 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.241245 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.241316 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.241331 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.241352 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.241366 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.344184 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.344233 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.344242 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.344258 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.344271 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.446568 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.446629 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.446643 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.446661 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.446677 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.549765 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.549852 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.549874 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.549902 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.549955 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.652891 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.652984 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.653003 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.653024 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.653035 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.756399 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.756475 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.756485 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.756503 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.756517 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.859915 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.860013 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.860027 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.860055 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.860070 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.963505 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.963545 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.963557 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.963573 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.963584 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4780]: I1210 10:46:28.974487 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.066463 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.066498 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.066508 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.066524 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.066534 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.168978 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.169202 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.169235 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.169271 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.169295 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.272545 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.272598 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.272608 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.272624 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.272634 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.376462 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.377093 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.377134 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.377156 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.377166 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.480059 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.480153 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.480173 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.480192 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.480204 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.583419 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.583464 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.583477 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.583496 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.583512 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.686062 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.686158 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.686188 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.686226 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.686253 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.789067 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.789125 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.789141 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.789162 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.789178 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.893365 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.893461 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.893477 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.893497 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.893509 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.958427 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.958519 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.958457 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.958477 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:29 crc kubenswrapper[4780]: E1210 10:46:29.958656 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:29 crc kubenswrapper[4780]: E1210 10:46:29.958764 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:29 crc kubenswrapper[4780]: E1210 10:46:29.958818 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:29 crc kubenswrapper[4780]: E1210 10:46:29.959030 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.996876 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.996935 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.996946 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.996968 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4780]: I1210 10:46:29.996978 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.100246 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.100305 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.100319 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.100346 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.100372 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.204667 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.204751 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.204771 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.204797 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.204815 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.308380 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.308443 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.308456 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.308486 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.308511 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.412435 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.412491 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.412511 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.412533 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.412549 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.492600 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.492706 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.492735 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.492771 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.492799 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: E1210 10:46:30.512684 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.519797 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.519864 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.519883 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.519907 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.519945 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: E1210 10:46:30.546673 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.551560 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.551604 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.551615 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.551633 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.551644 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: E1210 10:46:30.568544 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.575917 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.576001 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.576013 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.576036 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.576048 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: E1210 10:46:30.590239 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.595586 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.595637 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.595651 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.595670 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.595683 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: E1210 10:46:30.610176 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4780]: E1210 10:46:30.610377 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.612946 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.612991 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.613007 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.613027 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.613039 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.716559 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.716622 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.716633 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.716650 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.716662 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.820346 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.820408 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.820420 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.820442 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.820456 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.923755 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.923813 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.923826 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.923850 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4780]: I1210 10:46:30.923864 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.027221 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.027279 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.027292 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.027313 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.027327 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.130612 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.130658 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.130672 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.130690 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.130703 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.233533 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.233608 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.233624 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.233657 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.233674 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.336330 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.336373 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.336384 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.336398 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.336409 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.440659 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.440739 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.440751 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.440778 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.440797 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.525765 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.525998 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.526086 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526252 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526272 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:35.52622327 +0000 UTC m=+160.379616823 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526325 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:35.526311462 +0000 UTC m=+160.379705065 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.526399 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.526445 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526510 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526564 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:35.526537628 +0000 UTC m=+160.379931071 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526782 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526795 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526817 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526821 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526846 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526852 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526887 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:35.526872347 +0000 UTC m=+160.380265790 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.526963 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:35.526941768 +0000 UTC m=+160.380335361 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.543958 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.544034 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.544080 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.544110 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.544129 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.647391 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.647426 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.647444 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.647464 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.648791 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.752658 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.752724 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.752742 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.752762 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.752775 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.855838 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.855887 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.855899 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.855937 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.855952 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958063 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958125 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958157 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.958284 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958337 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.958396 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958423 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958450 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958464 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958482 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.958458 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:31 crc kubenswrapper[4780]: E1210 10:46:31.958950 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:31 crc kubenswrapper[4780]: I1210 10:46:31.958888 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.062671 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.062708 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.062722 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.062957 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.062979 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.289869 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.289958 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.289972 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.289991 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.290003 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.392991 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.393319 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.393329 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.393344 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.393354 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.496352 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.496423 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.496441 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.496484 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.496502 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.598995 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.599052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.599076 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.599107 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.599126 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.701902 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.701969 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.701979 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.701995 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.702006 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.804800 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.804863 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.804876 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.804896 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.804910 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.907580 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.907628 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.907646 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.907669 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.907685 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4780]: I1210 10:46:32.959094 4780 scope.go:117] "RemoveContainer" containerID="8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.010867 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.010936 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.010951 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.010976 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.011005 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.114687 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.115243 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.115568 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.115913 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.116295 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.222262 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.222328 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.222342 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.222360 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.222374 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.325386 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.325435 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.325445 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.325461 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.325471 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.428736 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.428798 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.428808 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.428824 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.428834 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.531753 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.531823 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.531838 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.531855 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.531868 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.634656 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.634714 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.634724 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.634740 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.634750 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.737760 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.737804 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.737816 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.737833 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.737843 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.841204 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.841254 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.841267 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.841285 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.841297 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.943517 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.943590 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.943605 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.943626 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.943638 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.957985 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.958081 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:33 crc kubenswrapper[4780]: E1210 10:46:33.958151 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.957994 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:33 crc kubenswrapper[4780]: I1210 10:46:33.958026 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:33 crc kubenswrapper[4780]: E1210 10:46:33.958280 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:33 crc kubenswrapper[4780]: E1210 10:46:33.958375 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:33 crc kubenswrapper[4780]: E1210 10:46:33.958521 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.046056 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.046142 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.046166 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.046183 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.046230 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.148694 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.148734 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.148744 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.148758 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.148793 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.267291 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.267333 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.267343 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.267358 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.267371 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.304738 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/2.log" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.309245 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.310354 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.333779 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.352281 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.370040 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.371676 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.371716 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.371724 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.371740 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.371751 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.386019 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.401682 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.419543 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.432557 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.450753 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6baf1baf-f093-40f2-b258-117118f89050\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311d33a46721ffe228b91722ad2c6f36efb49b5e31cb703de657cc4a40dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.470394 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.475626 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.475679 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.475705 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.475733 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.475763 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.496619 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.512457 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.531981 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.550011 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88e16d6d-ae53-465a-a66f-e1aa2abbfb8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55c85e62eda4733b5f1e264e6903e3f61bc4759bdf3f891c5b513a2195e0daab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://325438d9694126427a6f6905508c0feb1ab3918532c7d6fd929e63b409574f5f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf105ae693d68f4e8d5eaae47d0827146c24312f507b378cf60de03a5034bfb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.563324 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.578803 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.578859 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.578871 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.578896 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.578912 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.593442 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.609581 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.624121 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.642605 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:34Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.683097 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.683147 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.683161 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.683180 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.683195 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.787085 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.787138 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.787149 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.787170 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.787186 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.890563 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.890618 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.890636 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.890659 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.890685 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.994422 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.994485 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.994497 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.994516 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4780]: I1210 10:46:34.994527 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.097378 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.097420 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.097430 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.097449 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.097460 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.201398 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.201446 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.201460 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.201483 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.201495 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.304971 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.305043 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.305059 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.305084 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.305099 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.409378 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.409436 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.409449 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.409471 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.409484 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.512463 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.512541 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.512559 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.512587 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.512609 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.616852 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.616944 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.616957 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.616978 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.617390 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.720515 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.720556 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.720564 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.720582 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.720592 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.824880 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.824972 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.824987 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.825006 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.825016 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.928267 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.928322 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.928333 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.928355 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.928365 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.958683 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.958813 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.958862 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:35 crc kubenswrapper[4780]: E1210 10:46:35.958962 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.959083 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:35 crc kubenswrapper[4780]: E1210 10:46:35.959212 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:35 crc kubenswrapper[4780]: E1210 10:46:35.959486 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:35 crc kubenswrapper[4780]: E1210 10:46:35.959445 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:35 crc kubenswrapper[4780]: I1210 10:46:35.979965 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:35Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.004215 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.031290 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88e16d6d-ae53-465a-a66f-e1aa2abbfb8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55c85e62eda4733b5f1e264e6903e3f61bc4759bdf3f891c5b513a2195e0daab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://325438d9694126427a6f6905508c0feb1ab3918532c7d6fd929e63b409574f5f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf105ae693d68f4e8d5eaae47d0827146c24312f507b378cf60de03a5034bfb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.032225 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.032277 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.032290 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.032311 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.032326 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.053712 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.074783 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.093240 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.109493 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.131317 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.135566 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.135634 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.135647 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.135671 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.135683 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.146432 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.171425 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.191755 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.208793 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.225523 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.239287 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.239344 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.239358 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.239379 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.239397 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.243973 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.261097 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.275201 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.291139 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6baf1baf-f093-40f2-b258-117118f89050\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311d33a46721ffe228b91722ad2c6f36efb49b5e31cb703de657cc4a40dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.310080 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.343157 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.343218 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.343228 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.343249 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.343267 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.447468 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.447524 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.447539 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.447561 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.447580 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.551425 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.551510 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.551528 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.551559 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.551577 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.654128 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.654192 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.654204 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.654226 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.654255 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.758167 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.758223 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.758237 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.758262 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.758276 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.861665 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.861732 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.861750 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.861776 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.861794 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.965219 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.965265 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.965276 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.965297 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.965316 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4780]: I1210 10:46:36.982823 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.067950 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.068000 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.068011 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.068030 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.068043 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.172314 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.172691 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.172703 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.172720 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.172734 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.275979 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.276027 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.276038 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.276059 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.276072 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.352384 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/3.log" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.353580 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/2.log" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.357759 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" exitCode=1 Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.357847 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.357938 4780 scope.go:117] "RemoveContainer" containerID="8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.358978 4780 scope.go:117] "RemoveContainer" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:46:37 crc kubenswrapper[4780]: E1210 10:46:37.359176 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.381429 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.381519 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.381532 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.381551 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.381505 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.381568 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.403900 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.420023 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88e16d6d-ae53-465a-a66f-e1aa2abbfb8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55c85e62eda4733b5f1e264e6903e3f61bc4759bdf3f891c5b513a2195e0daab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://325438d9694126427a6f6905508c0feb1ab3918532c7d6fd929e63b409574f5f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf105ae693d68f4e8d5eaae47d0827146c24312f507b378cf60de03a5034bfb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.437999 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.457862 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.474523 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.486861 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.486976 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.486997 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.487052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.487069 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.490571 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.511155 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.529542 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.557511 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:37Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:35.399534 6896 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:35.399722 6896 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400003 6896 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400137 6896 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400506 6896 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:35.436709 6896 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1210 10:46:35.436755 6896 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1210 10:46:35.436865 6896 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:46:35.436895 6896 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1210 10:46:35.437103 6896 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.580419 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.590406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.590479 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.590493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.590510 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.590522 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.599659 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.614896 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.634284 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.652329 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.667226 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.682777 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6baf1baf-f093-40f2-b258-117118f89050\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311d33a46721ffe228b91722ad2c6f36efb49b5e31cb703de657cc4a40dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.693730 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.693776 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.693786 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.693807 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.693822 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.712871 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9618efa9-9ecd-45a2-9e0e-e7a6f5d2566c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b36eaabc1d3267a1ecc43c1b45a77bb14b87f6b9d376062e48ceb3daca3729b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1c2a440f85963adf3d061a880e537029c5c24a2291a46939e4ec9d939793ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41450938b04291094dea7c8bf9bf7a8a105fa00797e51a7589d4c6e8caf5eba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b332743ab49123e4673694feef994f5bb845661ca73dc8c0ac3be56dd6aa7e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e07cb0ca88a1b7a32f7c8ba948b0d5f90a08cb9f10c95285d6985d2daef5529b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.733884 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:37Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.797176 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.797227 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.797239 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.797256 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.797268 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.901010 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.901081 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.901094 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.901115 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.901130 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.958371 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.958371 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.958492 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:37 crc kubenswrapper[4780]: I1210 10:46:37.958811 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:37 crc kubenswrapper[4780]: E1210 10:46:37.958880 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:37 crc kubenswrapper[4780]: E1210 10:46:37.959060 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:37 crc kubenswrapper[4780]: E1210 10:46:37.959156 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:37 crc kubenswrapper[4780]: E1210 10:46:37.959360 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.005524 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.005599 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.005612 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.005632 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.005644 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.107936 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.107993 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.108007 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.108026 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.108038 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.212087 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.212175 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.212197 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.212235 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.212256 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.314849 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.314942 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.314954 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.314977 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.314992 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.366079 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/3.log" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.419222 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.419344 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.419393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.419419 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.419432 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.523271 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.523325 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.523339 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.523358 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.523372 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.626865 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.626969 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.626996 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.627023 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.627039 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.734853 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.734910 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.734948 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.734976 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.734988 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.839052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.839156 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.839174 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.839208 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.839233 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.943052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.943115 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.943131 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.943151 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4780]: I1210 10:46:38.943164 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.046562 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.046613 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.046625 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.046645 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.046654 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.149732 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.149808 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.149822 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.149843 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.149856 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.252241 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.252308 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.252323 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.252344 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.252357 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.355434 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.355534 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.355548 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.355567 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.355582 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.460022 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.460101 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.460121 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.460146 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.460164 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.563320 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.563406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.563431 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.563462 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.563481 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.666249 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.666339 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.666359 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.666388 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.666403 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.769411 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.769475 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.769493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.769520 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.769613 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.872774 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.872860 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.872881 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.872980 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.873014 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.958129 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.958255 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.958344 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.958416 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:39 crc kubenswrapper[4780]: E1210 10:46:39.958511 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:39 crc kubenswrapper[4780]: E1210 10:46:39.958663 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:39 crc kubenswrapper[4780]: E1210 10:46:39.958983 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:39 crc kubenswrapper[4780]: E1210 10:46:39.959015 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.976202 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.976286 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.976301 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.976332 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4780]: I1210 10:46:39.976345 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.080782 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.080852 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.080870 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.080899 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.080952 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.184592 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.184674 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.184723 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.184762 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.184790 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.288278 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.288349 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.288363 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.288381 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.288392 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.390598 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.390651 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.390663 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.390681 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.390691 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.494141 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.494218 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.494233 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.494260 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.494274 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.597485 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.597579 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.597601 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.597630 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.597651 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.701741 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.701811 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.701825 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.701848 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.701866 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.805039 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.805133 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.805152 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.805180 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.805200 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.908974 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.909048 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.909058 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.909076 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4780]: I1210 10:46:40.909086 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.004793 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.004837 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.004847 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.004864 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.004878 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.021766 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:41Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.027280 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.027346 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.027361 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.027382 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.027398 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.043365 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:41Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.048591 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.048662 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.048674 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.048693 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.048706 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.064812 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:41Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.070630 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.070693 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.070718 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.070746 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.070766 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.085609 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:41Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.090534 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.090618 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.090634 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.090679 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.090705 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.106964 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:41Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.107139 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.108979 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.109032 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.109044 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.109062 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.109076 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.213139 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.213239 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.213266 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.213296 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.213335 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.316520 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.316570 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.316582 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.316600 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.316614 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.421082 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.421142 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.421155 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.421173 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.421187 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.524443 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.524502 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.524517 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.524534 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.524547 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.627901 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.627963 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.627975 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.627989 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.627998 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.730415 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.730508 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.730529 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.730558 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.730576 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.833336 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.833405 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.833420 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.833438 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.833449 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.936729 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.936781 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.936793 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.936812 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.936824 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.958813 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.958846 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.958849 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:41 crc kubenswrapper[4780]: I1210 10:46:41.958887 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.959099 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.959246 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.959357 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:41 crc kubenswrapper[4780]: E1210 10:46:41.959461 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.039717 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.039772 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.039790 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.039809 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.039822 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.422244 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.422352 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.422366 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.422384 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.422399 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.524971 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.525077 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.525301 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.525322 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.525334 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.627968 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.628038 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.628050 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.628073 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.628085 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.731336 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.731385 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.731394 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.731413 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.731426 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.834291 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.834349 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.834360 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.834379 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.834408 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.937005 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.937077 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.937090 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.937104 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4780]: I1210 10:46:42.937114 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.040665 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.040806 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.040832 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.040865 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.040890 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.143504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.143601 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.143620 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.143639 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.143650 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.247589 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.247678 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.247691 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.247712 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.247725 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.351481 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.351565 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.351579 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.351600 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.351610 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.454982 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.455034 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.455044 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.455062 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.455072 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.557802 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.557850 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.557863 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.557880 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.557891 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.661286 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.661360 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.661379 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.661408 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.661427 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.764692 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.764769 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.764792 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.764819 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.764838 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.868814 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.868871 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.868884 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.868902 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.868913 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.958766 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.958795 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.958817 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:43 crc kubenswrapper[4780]: E1210 10:46:43.959203 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:43 crc kubenswrapper[4780]: E1210 10:46:43.959234 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.959327 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:43 crc kubenswrapper[4780]: E1210 10:46:43.959376 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:43 crc kubenswrapper[4780]: E1210 10:46:43.959432 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.972470 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.972538 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.972577 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.972619 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4780]: I1210 10:46:43.972638 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.075815 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.075892 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.075907 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.075957 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.075976 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.179980 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.180056 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.180072 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.180093 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.180105 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.283409 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.283480 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.283495 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.283516 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.283530 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.341856 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:44 crc kubenswrapper[4780]: E1210 10:46:44.342232 4780 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:44 crc kubenswrapper[4780]: E1210 10:46:44.342422 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs podName:24187953-1dc5-48d7-b00c-1e5876604b6b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:48.34237447 +0000 UTC m=+173.195767943 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs") pod "network-metrics-daemon-46s5p" (UID: "24187953-1dc5-48d7-b00c-1e5876604b6b") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.387746 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.387809 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.387821 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.387838 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.387850 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.490797 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.490846 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.490857 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.490872 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.490882 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.593555 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.593628 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.593646 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.593668 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.593683 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.698173 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.698215 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.698225 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.698240 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.698252 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.801854 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.801907 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.801950 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.801965 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.801974 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.905363 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.905437 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.905451 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.905473 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4780]: I1210 10:46:44.905491 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.008962 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.009060 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.009070 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.009092 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.009104 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.112358 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.112469 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.112491 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.112519 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.112539 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.215342 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.215393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.215406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.215423 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.215436 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.319879 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.319996 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.320015 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.320080 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.320101 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.424439 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.424508 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.424529 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.424561 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.424662 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.528402 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.528462 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.528474 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.528494 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.528505 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.632333 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.632392 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.632404 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.632424 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.632438 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.735347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.735429 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.735439 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.735456 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.735466 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.914191 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.915085 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.915145 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.915173 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.915185 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.958712 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.958802 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.958839 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:45 crc kubenswrapper[4780]: E1210 10:46:45.958887 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.958970 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:45 crc kubenswrapper[4780]: E1210 10:46:45.959073 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:45 crc kubenswrapper[4780]: E1210 10:46:45.959184 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:45 crc kubenswrapper[4780]: E1210 10:46:45.959274 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.977333 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4780]: I1210 10:46:45.990326 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.003819 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.020123 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.024103 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.024180 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.024201 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.024220 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.024238 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.034351 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.046300 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6baf1baf-f093-40f2-b258-117118f89050\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311d33a46721ffe228b91722ad2c6f36efb49b5e31cb703de657cc4a40dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.066559 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9618efa9-9ecd-45a2-9e0e-e7a6f5d2566c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b36eaabc1d3267a1ecc43c1b45a77bb14b87f6b9d376062e48ceb3daca3729b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1c2a440f85963adf3d061a880e537029c5c24a2291a46939e4ec9d939793ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41450938b04291094dea7c8bf9bf7a8a105fa00797e51a7589d4c6e8caf5eba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b332743ab49123e4673694feef994f5bb845661ca73dc8c0ac3be56dd6aa7e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e07cb0ca88a1b7a32f7c8ba948b0d5f90a08cb9f10c95285d6985d2daef5529b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.080185 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.099207 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.115784 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.127759 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.127818 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.127831 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.127850 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.127863 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.128971 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88e16d6d-ae53-465a-a66f-e1aa2abbfb8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55c85e62eda4733b5f1e264e6903e3f61bc4759bdf3f891c5b513a2195e0daab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://325438d9694126427a6f6905508c0feb1ab3918532c7d6fd929e63b409574f5f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf105ae693d68f4e8d5eaae47d0827146c24312f507b378cf60de03a5034bfb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.143663 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.159052 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.174003 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.187960 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.201997 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.214966 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.232469 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.232521 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.232531 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.232546 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.232556 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.235248 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8bae117db052a26101b63906a5bc25cd1ad0813261c80f78c00ee5235644c788\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1210 10:46:05.290278 6546 services_controller.go:451] Built service openshift-console/console cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:37Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:35.399534 6896 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:35.399722 6896 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400003 6896 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400137 6896 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400506 6896 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:35.436709 6896 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1210 10:46:35.436755 6896 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1210 10:46:35.436865 6896 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:46:35.436895 6896 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1210 10:46:35.437103 6896 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.247596 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.335876 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.335995 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.336009 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.336032 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.336048 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.438249 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.438333 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.438349 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.438372 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.438385 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.542020 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.542074 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.542087 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.542102 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.542113 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.645120 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.645180 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.645191 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.645210 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.645221 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.748691 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.748737 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.748746 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.748762 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.748776 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.851678 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.851724 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.851741 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.851756 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.851767 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.955171 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.955216 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.955224 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.955239 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4780]: I1210 10:46:46.955249 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.058606 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.058665 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.058680 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.058703 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.058718 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.161531 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.161573 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.161584 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.161603 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.161617 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.264226 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.264311 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.264331 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.264364 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.264393 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.367959 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.368024 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.368036 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.368059 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.368068 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.471173 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.471864 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.471945 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.471981 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.472003 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.574316 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.574724 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.574734 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.574749 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.574763 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.678339 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.678386 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.678399 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.678416 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.678428 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.781198 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.781257 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.781268 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.781285 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.781295 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.884078 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.884130 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.884142 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.884158 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.884168 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.958542 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.958542 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.958851 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.958979 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:47 crc kubenswrapper[4780]: E1210 10:46:47.959217 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:47 crc kubenswrapper[4780]: E1210 10:46:47.959259 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:47 crc kubenswrapper[4780]: E1210 10:46:47.959322 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:47 crc kubenswrapper[4780]: E1210 10:46:47.959361 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.987001 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.987051 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.987063 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.987081 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4780]: I1210 10:46:47.987094 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.090590 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.090669 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.090687 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.090709 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.090721 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.194398 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.194453 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.194462 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.194478 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.194492 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.297444 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.297491 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.297499 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.297516 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.297557 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.400073 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.400166 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.400185 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.400211 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.400230 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.503683 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.503733 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.503744 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.503761 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.503773 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.608393 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.608444 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.608453 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.608473 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.608487 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.711696 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.711739 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.711747 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.711763 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.711774 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.814347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.814414 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.814429 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.814449 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.814464 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.918122 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.918176 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.918192 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.918212 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.918224 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.958785 4780 scope.go:117] "RemoveContainer" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:46:48 crc kubenswrapper[4780]: E1210 10:46:48.959058 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.975058 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:48 crc kubenswrapper[4780]: I1210 10:46:48.990637 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.004787 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.017882 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6baf1baf-f093-40f2-b258-117118f89050\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311d33a46721ffe228b91722ad2c6f36efb49b5e31cb703de657cc4a40dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.021471 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.021505 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.021518 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.021535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.021547 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.051656 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9618efa9-9ecd-45a2-9e0e-e7a6f5d2566c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b36eaabc1d3267a1ecc43c1b45a77bb14b87f6b9d376062e48ceb3daca3729b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1c2a440f85963adf3d061a880e537029c5c24a2291a46939e4ec9d939793ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41450938b04291094dea7c8bf9bf7a8a105fa00797e51a7589d4c6e8caf5eba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b332743ab49123e4673694feef994f5bb845661ca73dc8c0ac3be56dd6aa7e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e07cb0ca88a1b7a32f7c8ba948b0d5f90a08cb9f10c95285d6985d2daef5529b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.065678 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.078765 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.092999 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.104359 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.116172 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88e16d6d-ae53-465a-a66f-e1aa2abbfb8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55c85e62eda4733b5f1e264e6903e3f61bc4759bdf3f891c5b513a2195e0daab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://325438d9694126427a6f6905508c0feb1ab3918532c7d6fd929e63b409574f5f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf105ae693d68f4e8d5eaae47d0827146c24312f507b378cf60de03a5034bfb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.124191 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.124232 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.124244 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.124262 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.124274 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.128974 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.142500 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.156036 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.169679 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.185224 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.198107 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.219452 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:37Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:35.399534 6896 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:35.399722 6896 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400003 6896 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400137 6896 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400506 6896 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:35.436709 6896 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1210 10:46:35.436755 6896 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1210 10:46:35.436865 6896 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:46:35.436895 6896 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1210 10:46:35.437103 6896 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.226621 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.226708 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.226722 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.226745 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.226758 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.238896 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.254338 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.330199 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.330240 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.330249 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.330265 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.330276 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.434056 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.434112 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.434126 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.434151 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.434163 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.536660 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.536998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.537121 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.537283 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.537368 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.641696 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.641849 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.642306 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.642699 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.642791 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.746727 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.746806 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.746819 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.746848 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.746862 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.850611 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.850685 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.850713 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.850737 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.850752 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.954709 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.955237 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.955398 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.955518 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.955651 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.958314 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.958480 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.958630 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:49 crc kubenswrapper[4780]: E1210 10:46:49.958800 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:49 crc kubenswrapper[4780]: I1210 10:46:49.958812 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:49 crc kubenswrapper[4780]: E1210 10:46:49.958963 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:49 crc kubenswrapper[4780]: E1210 10:46:49.960172 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:49 crc kubenswrapper[4780]: E1210 10:46:49.960324 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.059314 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.059400 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.059410 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.059428 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.059442 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.162639 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.162710 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.162727 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.162751 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.162765 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.266052 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.266451 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.266582 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.266707 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.266808 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.370144 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.370243 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.370269 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.370303 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.370323 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.474329 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.474405 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.474420 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.474441 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.474453 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.578043 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.578116 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.578133 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.578157 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.578171 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.682057 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.682440 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.682537 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.682633 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.682707 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.786252 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.786295 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.786307 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.786325 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.786337 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.889171 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.889220 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.889236 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.889256 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.889268 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.996144 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.996215 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.996227 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.996264 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4780]: I1210 10:46:50.996278 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.099319 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.099377 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.099387 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.099405 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.099419 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.203137 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.203240 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.203259 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.203283 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.203314 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.307591 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.307638 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.307650 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.307676 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.307688 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.411671 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.411737 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.411751 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.411771 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.411784 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.436824 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.436896 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.436965 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.437015 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.437042 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.457679 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.463875 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.463953 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.463965 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.463979 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.463990 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.481129 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.486282 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.486359 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.486369 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.486385 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.486396 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.504411 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.509620 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.509687 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.509704 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.509727 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.509743 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.529703 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.535340 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.535394 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.535409 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.535425 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.535436 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.549308 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"90085e8a-5ea9-4564-85e4-5635b00d094d\\\",\\\"systemUUID\\\":\\\"0182e509-70c5-4f26-9ad3-610230bb601e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.549429 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.551831 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.551873 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.551885 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.551904 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.551933 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.655203 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.655288 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.655313 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.655347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.655371 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.760372 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.760426 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.760440 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.760464 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.760480 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.863429 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.863507 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.863602 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.863638 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.863662 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.958720 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.958739 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.958871 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.959460 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.959815 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.959973 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.960083 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:51 crc kubenswrapper[4780]: E1210 10:46:51.960259 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.965652 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.965730 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.965757 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.965791 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4780]: I1210 10:46:51.965817 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.068868 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.068912 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.068935 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.068951 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.068961 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.172273 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.172561 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.172730 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.172874 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.173023 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.276115 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.276153 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.276162 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.276175 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.276187 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.378738 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.378774 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.378785 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.378800 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.378812 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.482013 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.482388 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.482493 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.482600 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.482773 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.586838 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.586909 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.586964 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.587137 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.587156 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.690323 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.690376 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.690387 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.690406 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.690416 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.794285 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.794333 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.794343 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.794364 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.794375 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.897355 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.897431 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.897443 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.897462 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4780]: I1210 10:46:52.897473 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.001344 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.001421 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.001434 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.001457 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.001474 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.105158 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.105216 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.105227 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.105252 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.105265 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.209904 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.209985 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.209997 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.210017 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.210030 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.313772 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.313834 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.313848 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.313865 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.313877 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.417217 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.417535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.417557 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.417590 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.417612 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.521739 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.521800 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.521815 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.521836 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.521850 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.625832 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.625908 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.625969 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.626001 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.626048 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.729785 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.729870 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.729900 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.729964 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.729985 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.838874 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.838974 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.838991 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.839010 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.839021 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.942901 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.942998 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.943008 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.943025 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.943035 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.958224 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.958273 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.958260 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:53 crc kubenswrapper[4780]: E1210 10:46:53.958445 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:53 crc kubenswrapper[4780]: I1210 10:46:53.958476 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:53 crc kubenswrapper[4780]: E1210 10:46:53.958611 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:53 crc kubenswrapper[4780]: E1210 10:46:53.958712 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:53 crc kubenswrapper[4780]: E1210 10:46:53.958864 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.049817 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.049933 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.049965 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.050024 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.050039 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.153735 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.153817 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.153836 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.153859 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.153873 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.258186 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.258252 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.258262 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.258281 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.258291 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.361958 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.362008 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.362026 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.362155 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.362186 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.465841 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.465902 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.465912 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.465946 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.465961 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.569363 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.569443 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.569455 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.569496 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.569511 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.673732 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.673790 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.673801 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.673818 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.673832 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.778644 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.778705 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.778728 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.778747 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.778762 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.881805 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.882051 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.882066 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.882090 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.882106 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.985407 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.985504 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.985516 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.985535 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4780]: I1210 10:46:54.985548 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.089188 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.089236 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.089249 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.089267 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.089277 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.193150 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.193232 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.193246 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.193273 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.193288 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.296558 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.296605 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.296614 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.296633 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.296652 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.400231 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.400282 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.400323 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.400347 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.400359 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.503079 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.503132 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.503144 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.503161 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.503173 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.606095 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.606153 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.606164 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.606183 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.606195 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.710414 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.710476 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.710491 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.710512 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.710525 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.813058 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.813116 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.813134 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.813153 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.813165 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4780]: E1210 10:46:55.913587 4780 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.957999 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:55 crc kubenswrapper[4780]: E1210 10:46:55.958277 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.958424 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.958542 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:55 crc kubenswrapper[4780]: E1210 10:46:55.958669 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.958691 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:55 crc kubenswrapper[4780]: E1210 10:46:55.959016 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:55 crc kubenswrapper[4780]: E1210 10:46:55.959064 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.973505 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6baf1baf-f093-40f2-b258-117118f89050\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9311d33a46721ffe228b91722ad2c6f36efb49b5e31cb703de657cc4a40dcbe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6136af77a31fde9c46edcf705caf249a029c3136befb7c0d84a26eab36d29a20\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:55 crc kubenswrapper[4780]: I1210 10:46:55.997363 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9618efa9-9ecd-45a2-9e0e-e7a6f5d2566c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b36eaabc1d3267a1ecc43c1b45a77bb14b87f6b9d376062e48ceb3daca3729b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1c2a440f85963adf3d061a880e537029c5c24a2291a46939e4ec9d939793ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41450938b04291094dea7c8bf9bf7a8a105fa00797e51a7589d4c6e8caf5eba5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b332743ab49123e4673694feef994f5bb845661ca73dc8c0ac3be56dd6aa7e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e07cb0ca88a1b7a32f7c8ba948b0d5f90a08cb9f10c95285d6985d2daef5529b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fa41953f04ce503bed17439bbaf1f2e80eb1aa886cda19a5a27039f3015c5b7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f16bee2af08ca98dad1fbbc5864b36f90851e2bcac54b37870c560109e1206f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2c2882ae9c017aadcf220f621d126e4b993c91d6fc35cc71ea7a37eadc9879a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.019005 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"056c06c7-eb0f-4fb7-9f86-2884fd0d1e60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9592145faf65060693a3f1e14db253e10cd09c642ea0aa1a7682f5b06872f14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://eccc5070135705c193e1020872bffc1b8aeb70dbe82f1bb520f36f012ca00703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72f3bd530fbcc66ed779ee9f6ef1c442f2697ca027aad5bd2473f42101d55528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3970fa6bd2aa3e93a4473d78218e9dd494aa5f7056efbcfb123160bb63162192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.039711 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.054211 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://acd480054c5d19efdbcce679b2246ce2972593aa710b1074a2fa90006f351c66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c28d1629c1d97373d475f14e54585e81a67114d5a69e21be9d1e750040d6d5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.067878 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-msm77" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1046aa3-9bf1-4013-8e8d-5629f08ed5e2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a59aa8cd7a70e094ff80b0ae626aaabb77fb7ffdcffa1320945909c32da0704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2ttwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:30Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-msm77\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.084842 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88e16d6d-ae53-465a-a66f-e1aa2abbfb8c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://55c85e62eda4733b5f1e264e6903e3f61bc4759bdf3f891c5b513a2195e0daab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://325438d9694126427a6f6905508c0feb1ab3918532c7d6fd929e63b409574f5f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf105ae693d68f4e8d5eaae47d0827146c24312f507b378cf60de03a5034bfb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.098802 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://738eca4a4800d0b83e165aaad1b57939c0fca3335dbf043de168bcc240b2c32c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.111192 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.123713 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8cwb7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"deadb49b-61b8-435f-8168-d7bd3c01b5ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:22Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c\\\\n2025-12-10T10:45:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_86c2478e-d0b0-468d-aefe-1644c458013c to /host/opt/cni/bin/\\\\n2025-12-10T10:45:36Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:36Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r67pg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8cwb7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.141890 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e522fb8-b104-4f14-a3a2-628fbe0ef36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3318d09af03d52017f648e9727180287dbcfce150242eaeb5f95be2fa6a1374\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5855e463a188d72cb8eac92bd97b9b2f91550a9a5437d7df83405bbe7a104f5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a16db7b744311d27339440bc46ebd09ba4264c5892ff205c6125af2a127e67e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcbeee4d4e545150d725187049a89aa3d35ddd8f7767a6af0ea84a385454c316\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d60cbe3cc8e817975ed6843aefdbfe3233e8857a6458d77322a623ed858ca18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a0b3aad440bea346393ff35f0fa9492282c2a69b4a4c170f56ca7af63666077\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b72566e3e3a5f8eb12033cd2342bc3e9ad8f091f47c9d65309888d91a1ceaf4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x4kzt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rwfxn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.156793 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.168821 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2lx8w" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a3251eb-408c-42f1-b74d-261cb45eab71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://414f485a56b9fb2fbea2bbc9687a588aabda6239cad44f8116ec680597dbe667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lgkg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2lx8w\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.188180 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:37Z\\\",\\\"message\\\":\\\"ector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:35.399534 6896 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:35.399722 6896 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400003 6896 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400137 6896 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:35.400506 6896 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:35.436709 6896 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1210 10:46:35.436755 6896 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1210 10:46:35.436865 6896 ovnkube.go:599] Stopped ovnkube\\\\nI1210 10:46:35.436895 6896 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1210 10:46:35.437103 6896 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:33Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-whmzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-fpl55\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.201106 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a78da08f-1ec1-4cc7-af55-d527da423778\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://920fb034d3d8ce6bf28b128e33093aa5daa724b15c51c7e208ed94ccc2f4840e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a4509159a8aa8b24876c86f827f346063fd6cd2603c15242f6507b0a4afaff3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jvtxj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-6m7tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.213116 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-46s5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24187953-1dc5-48d7-b00c-1e5876604b6b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jg69p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-46s5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.227609 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"da4d38a3-53c0-417d-a86f-3496714bd352\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:44:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:44:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"message\\\":\\\"file observer\\\\nW1210 10:45:20.821547 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:20.821865 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:20.824662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-536389068/tls.crt::/tmp/serving-cert-536389068/tls.key\\\\\\\"\\\\nI1210 10:45:21.107127 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:21.109726 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:21.109750 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:21.109791 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:21.109797 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:21.115580 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:21.115608 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115614 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:21.115621 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:21.115664 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:21.115672 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:21.115676 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:21.117097 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:21.119004 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:14Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:44:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:44:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:44:56Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.239002 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8ca741d2df061bc77e981eca8a555ba59082c6db0357093490908a1b27054340\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:56 crc kubenswrapper[4780]: I1210 10:46:56.253276 4780 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6bf1dca1-b191-4796-b326-baac53e84045\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://439376b6f14a03790c4d49523ce4f21e3ee73ee0a81954f873409cf956048f53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sh92h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xhdr5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4780]: E1210 10:46:57.426938 4780 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:46:57 crc kubenswrapper[4780]: I1210 10:46:57.958832 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:57 crc kubenswrapper[4780]: I1210 10:46:57.958832 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:57 crc kubenswrapper[4780]: I1210 10:46:57.958868 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:57 crc kubenswrapper[4780]: I1210 10:46:57.959021 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:57 crc kubenswrapper[4780]: E1210 10:46:57.961217 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:57 crc kubenswrapper[4780]: E1210 10:46:57.961438 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:57 crc kubenswrapper[4780]: E1210 10:46:57.961531 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:46:57 crc kubenswrapper[4780]: E1210 10:46:57.962138 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:59 crc kubenswrapper[4780]: I1210 10:46:59.958131 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:59 crc kubenswrapper[4780]: I1210 10:46:59.958178 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:46:59 crc kubenswrapper[4780]: I1210 10:46:59.958279 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:59 crc kubenswrapper[4780]: I1210 10:46:59.958490 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:59 crc kubenswrapper[4780]: E1210 10:46:59.958582 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:59 crc kubenswrapper[4780]: E1210 10:46:59.958577 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:59 crc kubenswrapper[4780]: E1210 10:46:59.958658 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:59 crc kubenswrapper[4780]: E1210 10:46:59.958747 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.827098 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.827182 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.827193 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.827247 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.827289 4780 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.911750 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh"] Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.913275 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.916962 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.917089 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.917153 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.917895 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.945313 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=90.945238767 podStartE2EDuration="1m30.945238767s" podCreationTimestamp="2025-12-10 10:45:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:01.943638565 +0000 UTC m=+126.797032008" watchObservedRunningTime="2025-12-10 10:47:01.945238767 +0000 UTC m=+126.798632210" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.958864 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.958887 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.959009 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.959073 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:01 crc kubenswrapper[4780]: E1210 10:47:01.959416 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:01 crc kubenswrapper[4780]: E1210 10:47:01.959704 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:01 crc kubenswrapper[4780]: E1210 10:47:01.959833 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:01 crc kubenswrapper[4780]: I1210 10:47:01.959849 4780 scope.go:117] "RemoveContainer" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:47:01 crc kubenswrapper[4780]: E1210 10:47:01.959912 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:01 crc kubenswrapper[4780]: E1210 10:47:01.960373 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.020429 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1a73083e-1327-478a-8771-b9fe51fda7dd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.020586 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a73083e-1327-478a-8771-b9fe51fda7dd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.020623 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1a73083e-1327-478a-8771-b9fe51fda7dd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.020784 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1a73083e-1327-478a-8771-b9fe51fda7dd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.024014 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a73083e-1327-478a-8771-b9fe51fda7dd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.048105 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podStartSLOduration=99.048068247 podStartE2EDuration="1m39.048068247s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.047728188 +0000 UTC m=+126.901121631" watchObservedRunningTime="2025-12-10 10:47:02.048068247 +0000 UTC m=+126.901461690" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.065316 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-msm77" podStartSLOduration=99.06528928 podStartE2EDuration="1m39.06528928s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.064617913 +0000 UTC m=+126.918011356" watchObservedRunningTime="2025-12-10 10:47:02.06528928 +0000 UTC m=+126.918682723" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.083588 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=34.083559241 podStartE2EDuration="34.083559241s" podCreationTimestamp="2025-12-10 10:46:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.083320455 +0000 UTC m=+126.936713918" watchObservedRunningTime="2025-12-10 10:47:02.083559241 +0000 UTC m=+126.936952684" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.119104 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=26.119081866 podStartE2EDuration="26.119081866s" podCreationTimestamp="2025-12-10 10:46:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.118482471 +0000 UTC m=+126.971875904" watchObservedRunningTime="2025-12-10 10:47:02.119081866 +0000 UTC m=+126.972475309" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.125403 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1a73083e-1327-478a-8771-b9fe51fda7dd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.125466 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a73083e-1327-478a-8771-b9fe51fda7dd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.125505 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1a73083e-1327-478a-8771-b9fe51fda7dd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.125552 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a73083e-1327-478a-8771-b9fe51fda7dd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.125570 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1a73083e-1327-478a-8771-b9fe51fda7dd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.125652 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1a73083e-1327-478a-8771-b9fe51fda7dd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.125743 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1a73083e-1327-478a-8771-b9fe51fda7dd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.127107 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a73083e-1327-478a-8771-b9fe51fda7dd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.134410 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=78.134375971 podStartE2EDuration="1m18.134375971s" podCreationTimestamp="2025-12-10 10:45:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.133610781 +0000 UTC m=+126.987004224" watchObservedRunningTime="2025-12-10 10:47:02.134375971 +0000 UTC m=+126.987769414" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.138650 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a73083e-1327-478a-8771-b9fe51fda7dd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.149022 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1a73083e-1327-478a-8771-b9fe51fda7dd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9xmgh\" (UID: \"1a73083e-1327-478a-8771-b9fe51fda7dd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.230654 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=38.23061698 podStartE2EDuration="38.23061698s" podCreationTimestamp="2025-12-10 10:46:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.206832778 +0000 UTC m=+127.060237471" watchObservedRunningTime="2025-12-10 10:47:02.23061698 +0000 UTC m=+127.084010443" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.235061 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.313627 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-rwfxn" podStartSLOduration=98.313594149 podStartE2EDuration="1m38.313594149s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.313438355 +0000 UTC m=+127.166831808" watchObservedRunningTime="2025-12-10 10:47:02.313594149 +0000 UTC m=+127.166987592" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.314552 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-8cwb7" podStartSLOduration=99.314544653 podStartE2EDuration="1m39.314544653s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.286155682 +0000 UTC m=+127.139549125" watchObservedRunningTime="2025-12-10 10:47:02.314544653 +0000 UTC m=+127.167938096" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.372896 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-2lx8w" podStartSLOduration=99.372863526 podStartE2EDuration="1m39.372863526s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.372714892 +0000 UTC m=+127.226108355" watchObservedRunningTime="2025-12-10 10:47:02.372863526 +0000 UTC m=+127.226256969" Dec 10 10:47:02 crc kubenswrapper[4780]: E1210 10:47:02.429412 4780 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.436186 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-6m7tj" podStartSLOduration=98.436155327 podStartE2EDuration="1m38.436155327s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:02.436138517 +0000 UTC m=+127.289531970" watchObservedRunningTime="2025-12-10 10:47:02.436155327 +0000 UTC m=+127.289548770" Dec 10 10:47:02 crc kubenswrapper[4780]: I1210 10:47:02.854256 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" event={"ID":"1a73083e-1327-478a-8771-b9fe51fda7dd","Type":"ContainerStarted","Data":"072493598b89f8a53af87fe9331fda535520db46b52a6ef3ca8c38445d638919"} Dec 10 10:47:03 crc kubenswrapper[4780]: I1210 10:47:03.861648 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" event={"ID":"1a73083e-1327-478a-8771-b9fe51fda7dd","Type":"ContainerStarted","Data":"3998bfce4055fe6107af31bf80f40193fd4271540ec8fb4212a3e633c2711ae7"} Dec 10 10:47:03 crc kubenswrapper[4780]: I1210 10:47:03.958810 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:03 crc kubenswrapper[4780]: I1210 10:47:03.958822 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:03 crc kubenswrapper[4780]: I1210 10:47:03.958847 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:03 crc kubenswrapper[4780]: I1210 10:47:03.959055 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:03 crc kubenswrapper[4780]: E1210 10:47:03.959301 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:03 crc kubenswrapper[4780]: E1210 10:47:03.959531 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:03 crc kubenswrapper[4780]: E1210 10:47:03.959656 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:03 crc kubenswrapper[4780]: E1210 10:47:03.959748 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:05 crc kubenswrapper[4780]: I1210 10:47:05.958404 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:05 crc kubenswrapper[4780]: I1210 10:47:05.958404 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:05 crc kubenswrapper[4780]: I1210 10:47:05.958429 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:05 crc kubenswrapper[4780]: E1210 10:47:05.959766 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:05 crc kubenswrapper[4780]: I1210 10:47:05.959871 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:05 crc kubenswrapper[4780]: E1210 10:47:05.960114 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:05 crc kubenswrapper[4780]: E1210 10:47:05.960173 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:05 crc kubenswrapper[4780]: E1210 10:47:05.960237 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:07 crc kubenswrapper[4780]: E1210 10:47:07.432124 4780 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:07 crc kubenswrapper[4780]: I1210 10:47:07.957842 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:07 crc kubenswrapper[4780]: E1210 10:47:07.958160 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:07 crc kubenswrapper[4780]: I1210 10:47:07.958244 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:07 crc kubenswrapper[4780]: E1210 10:47:07.958329 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:07 crc kubenswrapper[4780]: I1210 10:47:07.958436 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:07 crc kubenswrapper[4780]: I1210 10:47:07.958484 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:07 crc kubenswrapper[4780]: E1210 10:47:07.958671 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:07 crc kubenswrapper[4780]: E1210 10:47:07.958692 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.892595 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/1.log" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.893632 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/0.log" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.893809 4780 generic.go:334] "Generic (PLEG): container finished" podID="deadb49b-61b8-435f-8168-d7bd3c01b5ad" containerID="caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88" exitCode=1 Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.893867 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8cwb7" event={"ID":"deadb49b-61b8-435f-8168-d7bd3c01b5ad","Type":"ContainerDied","Data":"caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88"} Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.894167 4780 scope.go:117] "RemoveContainer" containerID="b46e1864cd02c0ab49ad21329dedf09667d52948a3f59eac7f32302cc0b48bda" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.895065 4780 scope.go:117] "RemoveContainer" containerID="caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88" Dec 10 10:47:09 crc kubenswrapper[4780]: E1210 10:47:09.895321 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-8cwb7_openshift-multus(deadb49b-61b8-435f-8168-d7bd3c01b5ad)\"" pod="openshift-multus/multus-8cwb7" podUID="deadb49b-61b8-435f-8168-d7bd3c01b5ad" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.928207 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9xmgh" podStartSLOduration=106.928174164 podStartE2EDuration="1m46.928174164s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:03.884490307 +0000 UTC m=+128.737883750" watchObservedRunningTime="2025-12-10 10:47:09.928174164 +0000 UTC m=+134.781567607" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.958589 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.958729 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:09 crc kubenswrapper[4780]: E1210 10:47:09.958775 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:09 crc kubenswrapper[4780]: E1210 10:47:09.959015 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.959101 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:09 crc kubenswrapper[4780]: I1210 10:47:09.959287 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:09 crc kubenswrapper[4780]: E1210 10:47:09.959424 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:09 crc kubenswrapper[4780]: E1210 10:47:09.959581 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:10 crc kubenswrapper[4780]: I1210 10:47:10.902012 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/1.log" Dec 10 10:47:11 crc kubenswrapper[4780]: I1210 10:47:11.957614 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:11 crc kubenswrapper[4780]: E1210 10:47:11.957785 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:11 crc kubenswrapper[4780]: I1210 10:47:11.957904 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:11 crc kubenswrapper[4780]: I1210 10:47:11.958016 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:11 crc kubenswrapper[4780]: I1210 10:47:11.957941 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:11 crc kubenswrapper[4780]: E1210 10:47:11.958297 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:11 crc kubenswrapper[4780]: E1210 10:47:11.958389 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:11 crc kubenswrapper[4780]: E1210 10:47:11.958627 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:12 crc kubenswrapper[4780]: E1210 10:47:12.433715 4780 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:13 crc kubenswrapper[4780]: I1210 10:47:13.958156 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:13 crc kubenswrapper[4780]: I1210 10:47:13.958166 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:13 crc kubenswrapper[4780]: I1210 10:47:13.958197 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:13 crc kubenswrapper[4780]: I1210 10:47:13.958397 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:13 crc kubenswrapper[4780]: E1210 10:47:13.958552 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:13 crc kubenswrapper[4780]: E1210 10:47:13.958835 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:13 crc kubenswrapper[4780]: E1210 10:47:13.959049 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:13 crc kubenswrapper[4780]: E1210 10:47:13.959210 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:15 crc kubenswrapper[4780]: I1210 10:47:15.957759 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:15 crc kubenswrapper[4780]: I1210 10:47:15.957816 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:15 crc kubenswrapper[4780]: I1210 10:47:15.957991 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:15 crc kubenswrapper[4780]: I1210 10:47:15.960284 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:15 crc kubenswrapper[4780]: E1210 10:47:15.960263 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:15 crc kubenswrapper[4780]: E1210 10:47:15.960524 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:15 crc kubenswrapper[4780]: E1210 10:47:15.960642 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:15 crc kubenswrapper[4780]: E1210 10:47:15.960687 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:16 crc kubenswrapper[4780]: I1210 10:47:16.959257 4780 scope.go:117] "RemoveContainer" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:47:16 crc kubenswrapper[4780]: E1210 10:47:16.959557 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-fpl55_openshift-ovn-kubernetes(cc22221d-0c02-4e8c-8314-c2e6d9290b5e)\"" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" Dec 10 10:47:17 crc kubenswrapper[4780]: E1210 10:47:17.435145 4780 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:17 crc kubenswrapper[4780]: I1210 10:47:17.958254 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:17 crc kubenswrapper[4780]: I1210 10:47:17.958254 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:17 crc kubenswrapper[4780]: E1210 10:47:17.958597 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:17 crc kubenswrapper[4780]: I1210 10:47:17.958310 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:17 crc kubenswrapper[4780]: I1210 10:47:17.958251 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:17 crc kubenswrapper[4780]: E1210 10:47:17.959027 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:17 crc kubenswrapper[4780]: E1210 10:47:17.959177 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:17 crc kubenswrapper[4780]: E1210 10:47:17.959239 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:19 crc kubenswrapper[4780]: I1210 10:47:19.957840 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:19 crc kubenswrapper[4780]: I1210 10:47:19.957907 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:19 crc kubenswrapper[4780]: I1210 10:47:19.957841 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:19 crc kubenswrapper[4780]: I1210 10:47:19.957875 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:19 crc kubenswrapper[4780]: E1210 10:47:19.958305 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:19 crc kubenswrapper[4780]: E1210 10:47:19.958438 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:19 crc kubenswrapper[4780]: E1210 10:47:19.958591 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:19 crc kubenswrapper[4780]: E1210 10:47:19.958732 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:21 crc kubenswrapper[4780]: I1210 10:47:21.958839 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:21 crc kubenswrapper[4780]: I1210 10:47:21.958840 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:21 crc kubenswrapper[4780]: I1210 10:47:21.958839 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:21 crc kubenswrapper[4780]: I1210 10:47:21.958840 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:21 crc kubenswrapper[4780]: E1210 10:47:21.959349 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:21 crc kubenswrapper[4780]: E1210 10:47:21.959470 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:21 crc kubenswrapper[4780]: E1210 10:47:21.959616 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:21 crc kubenswrapper[4780]: E1210 10:47:21.959764 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:22 crc kubenswrapper[4780]: E1210 10:47:22.436607 4780 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:22 crc kubenswrapper[4780]: I1210 10:47:22.959425 4780 scope.go:117] "RemoveContainer" containerID="caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88" Dec 10 10:47:23 crc kubenswrapper[4780]: I1210 10:47:23.958134 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:23 crc kubenswrapper[4780]: I1210 10:47:23.958615 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:23 crc kubenswrapper[4780]: E1210 10:47:23.958690 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:23 crc kubenswrapper[4780]: I1210 10:47:23.958716 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:23 crc kubenswrapper[4780]: I1210 10:47:23.958746 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:23 crc kubenswrapper[4780]: E1210 10:47:23.958811 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:23 crc kubenswrapper[4780]: E1210 10:47:23.959014 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:23 crc kubenswrapper[4780]: E1210 10:47:23.959684 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:23 crc kubenswrapper[4780]: I1210 10:47:23.980043 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/1.log" Dec 10 10:47:23 crc kubenswrapper[4780]: I1210 10:47:23.980125 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8cwb7" event={"ID":"deadb49b-61b8-435f-8168-d7bd3c01b5ad","Type":"ContainerStarted","Data":"15785f7813590fd04ab190b13bb01dfd35df6b281b159d464b7146b18f150880"} Dec 10 10:47:25 crc kubenswrapper[4780]: I1210 10:47:25.957862 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:25 crc kubenswrapper[4780]: I1210 10:47:25.957943 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:25 crc kubenswrapper[4780]: I1210 10:47:25.957943 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:25 crc kubenswrapper[4780]: I1210 10:47:25.957952 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:25 crc kubenswrapper[4780]: E1210 10:47:25.959129 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:25 crc kubenswrapper[4780]: E1210 10:47:25.959244 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:25 crc kubenswrapper[4780]: E1210 10:47:25.959373 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:25 crc kubenswrapper[4780]: E1210 10:47:25.959477 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:27 crc kubenswrapper[4780]: E1210 10:47:27.439166 4780 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:27 crc kubenswrapper[4780]: I1210 10:47:27.958351 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:27 crc kubenswrapper[4780]: I1210 10:47:27.958455 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:27 crc kubenswrapper[4780]: I1210 10:47:27.958509 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:27 crc kubenswrapper[4780]: E1210 10:47:27.958640 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:27 crc kubenswrapper[4780]: I1210 10:47:27.958741 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:27 crc kubenswrapper[4780]: E1210 10:47:27.958819 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:27 crc kubenswrapper[4780]: E1210 10:47:27.958892 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:27 crc kubenswrapper[4780]: E1210 10:47:27.958994 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:29 crc kubenswrapper[4780]: I1210 10:47:29.958126 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:29 crc kubenswrapper[4780]: I1210 10:47:29.958126 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:29 crc kubenswrapper[4780]: E1210 10:47:29.958317 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:29 crc kubenswrapper[4780]: I1210 10:47:29.958169 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:29 crc kubenswrapper[4780]: I1210 10:47:29.958140 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:29 crc kubenswrapper[4780]: E1210 10:47:29.958386 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:29 crc kubenswrapper[4780]: E1210 10:47:29.958761 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:29 crc kubenswrapper[4780]: E1210 10:47:29.958859 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:30 crc kubenswrapper[4780]: I1210 10:47:30.959453 4780 scope.go:117] "RemoveContainer" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:47:31 crc kubenswrapper[4780]: I1210 10:47:31.957962 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:31 crc kubenswrapper[4780]: E1210 10:47:31.958994 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:31 crc kubenswrapper[4780]: I1210 10:47:31.959172 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:31 crc kubenswrapper[4780]: E1210 10:47:31.959311 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:31 crc kubenswrapper[4780]: I1210 10:47:31.959655 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:31 crc kubenswrapper[4780]: E1210 10:47:31.960189 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:31 crc kubenswrapper[4780]: I1210 10:47:31.960435 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:31 crc kubenswrapper[4780]: E1210 10:47:31.960624 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:32 crc kubenswrapper[4780]: I1210 10:47:32.009083 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/3.log" Dec 10 10:47:32 crc kubenswrapper[4780]: I1210 10:47:32.012368 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerStarted","Data":"3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30"} Dec 10 10:47:32 crc kubenswrapper[4780]: I1210 10:47:32.013117 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:47:32 crc kubenswrapper[4780]: E1210 10:47:32.618844 4780 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:32 crc kubenswrapper[4780]: I1210 10:47:32.634832 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podStartSLOduration=128.634777256 podStartE2EDuration="2m8.634777256s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:32.056729703 +0000 UTC m=+156.910123146" watchObservedRunningTime="2025-12-10 10:47:32.634777256 +0000 UTC m=+157.488170699" Dec 10 10:47:32 crc kubenswrapper[4780]: I1210 10:47:32.636142 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-46s5p"] Dec 10 10:47:32 crc kubenswrapper[4780]: I1210 10:47:32.636316 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:32 crc kubenswrapper[4780]: E1210 10:47:32.636424 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:33 crc kubenswrapper[4780]: I1210 10:47:33.958521 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:33 crc kubenswrapper[4780]: E1210 10:47:33.959051 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:33 crc kubenswrapper[4780]: I1210 10:47:33.958521 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:33 crc kubenswrapper[4780]: I1210 10:47:33.958548 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:33 crc kubenswrapper[4780]: E1210 10:47:33.959312 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:33 crc kubenswrapper[4780]: E1210 10:47:33.959410 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:34 crc kubenswrapper[4780]: I1210 10:47:34.999045 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:34.999459 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:35 crc kubenswrapper[4780]: I1210 10:47:35.609162 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:35 crc kubenswrapper[4780]: I1210 10:47:35.609514 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.609676 4780 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.609799 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:49:37.609626644 +0000 UTC m=+282.463020087 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:35 crc kubenswrapper[4780]: I1210 10:47:35.609879 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610007 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:49:37.609978454 +0000 UTC m=+282.463371997 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:47:35 crc kubenswrapper[4780]: I1210 10:47:35.610057 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:35 crc kubenswrapper[4780]: I1210 10:47:35.610102 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610143 4780 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610247 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:49:37.61023425 +0000 UTC m=+282.463627693 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610258 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610267 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610295 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610327 4780 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610276 4780 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610403 4780 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610410 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:49:37.610400135 +0000 UTC m=+282.463793578 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.610484 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:49:37.610464396 +0000 UTC m=+282.463857839 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:47:35 crc kubenswrapper[4780]: I1210 10:47:35.958164 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:35 crc kubenswrapper[4780]: I1210 10:47:35.958164 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:35 crc kubenswrapper[4780]: I1210 10:47:35.958210 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.959981 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.960111 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:35 crc kubenswrapper[4780]: E1210 10:47:35.960212 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:36 crc kubenswrapper[4780]: I1210 10:47:36.958008 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:36 crc kubenswrapper[4780]: E1210 10:47:36.959047 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-46s5p" podUID="24187953-1dc5-48d7-b00c-1e5876604b6b" Dec 10 10:47:37 crc kubenswrapper[4780]: I1210 10:47:37.958061 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:37 crc kubenswrapper[4780]: I1210 10:47:37.958065 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:37 crc kubenswrapper[4780]: I1210 10:47:37.958086 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:37 crc kubenswrapper[4780]: I1210 10:47:37.962255 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 10 10:47:37 crc kubenswrapper[4780]: I1210 10:47:37.962530 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 10 10:47:37 crc kubenswrapper[4780]: I1210 10:47:37.962834 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 10 10:47:37 crc kubenswrapper[4780]: I1210 10:47:37.964712 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 10 10:47:38 crc kubenswrapper[4780]: I1210 10:47:38.957978 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:38 crc kubenswrapper[4780]: I1210 10:47:38.962292 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 10 10:47:38 crc kubenswrapper[4780]: I1210 10:47:38.962292 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.149268 4780 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.542298 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-9hgsw"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.543406 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.546763 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zvngr"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.547465 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.547833 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.548446 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.551723 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.551967 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.552336 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.552594 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.552840 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.556279 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.557128 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-2mb96"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.557636 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.558272 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.560484 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.564151 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.564221 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 10 10:47:42 crc kubenswrapper[4780]: W1210 10:47:42.564271 4780 reflector.go:561] object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2": failed to list *v1.Secret: secrets "route-controller-manager-sa-dockercfg-h2zr2" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-route-controller-manager": no relationship found between node 'crc' and this object Dec 10 10:47:42 crc kubenswrapper[4780]: E1210 10:47:42.564402 4780 reflector.go:158] "Unhandled Error" err="object-\"openshift-route-controller-manager\"/\"route-controller-manager-sa-dockercfg-h2zr2\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"route-controller-manager-sa-dockercfg-h2zr2\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-route-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.565129 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.568019 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.577410 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.577455 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.578302 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.578934 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.589163 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.589472 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592399 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-trusted-ca-bundle\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592471 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40ab5014-1713-4cec-8577-58a4573025e8-serving-cert\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592528 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-config\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592551 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-serving-cert\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592574 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592598 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-config\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592620 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-serving-cert\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592650 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgrlg\" (UniqueName: \"kubernetes.io/projected/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-kube-api-access-sgrlg\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592697 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-serving-cert\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592747 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5464f7ae-9634-4208-a5f5-3e6299f72639-node-pullsecrets\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592778 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-machine-approver-tls\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592804 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/428a3826-00fc-4452-8f22-61d02857b761-config\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592827 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/428a3826-00fc-4452-8f22-61d02857b761-images\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592852 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xphz\" (UniqueName: \"kubernetes.io/projected/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-kube-api-access-2xphz\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592879 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-etcd-client\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592949 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nddf\" (UniqueName: \"kubernetes.io/projected/135e9ec7-c882-4894-a24e-669b09be3f5b-kube-api-access-8nddf\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.592999 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5464f7ae-9634-4208-a5f5-3e6299f72639-audit-dir\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593059 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593087 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-client-ca\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593116 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-etcd-serving-ca\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593141 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/428a3826-00fc-4452-8f22-61d02857b761-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593163 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-service-ca-bundle\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593198 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-audit-policies\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593226 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-config\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593264 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzhgp\" (UniqueName: \"kubernetes.io/projected/5464f7ae-9634-4208-a5f5-3e6299f72639-kube-api-access-zzhgp\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593325 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-image-import-ca\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593354 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-auth-proxy-config\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593389 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-audit\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593419 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593447 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-etcd-client\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.593496 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/135e9ec7-c882-4894-a24e-669b09be3f5b-audit-dir\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.595661 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s84v8\" (UniqueName: \"kubernetes.io/projected/428a3826-00fc-4452-8f22-61d02857b761-kube-api-access-s84v8\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.595890 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-encryption-config\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.596043 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-config\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.596177 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rwft\" (UniqueName: \"kubernetes.io/projected/40ab5014-1713-4cec-8577-58a4573025e8-kube-api-access-5rwft\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.596236 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-encryption-config\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.596531 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.596735 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.596831 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597011 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597263 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597277 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597396 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597569 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597659 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597735 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597582 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.597985 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598039 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598223 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598247 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598307 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598410 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598490 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598573 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598679 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598766 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598843 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598903 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598724 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.599089 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598865 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.598727 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.599044 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.601824 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.605079 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-njfgs"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.605202 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.605891 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.623900 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.624287 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.625028 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.629867 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-52wql"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.632739 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-lmmsq"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.633912 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.636276 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.638966 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.641039 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-fxxzq"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.644617 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.646386 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.652779 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.652894 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.653244 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.653602 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.653899 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.654026 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.654377 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.654501 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.654651 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.655020 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.655289 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.655424 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.655506 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.655709 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.655802 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.655942 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.656203 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.657246 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-6tqmn"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.658224 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.658809 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.659360 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.662170 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.662351 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.662512 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.662672 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.663460 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.664085 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.664527 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.664677 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.667416 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.669146 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-25gsf"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.670040 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.668042 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.674801 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.674981 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.675262 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.675282 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.675368 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.675434 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.675528 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.675728 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.675878 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.675985 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.676050 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.676157 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.676211 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.676276 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.676544 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.676688 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.676844 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.677011 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.677161 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.677326 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.677498 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.677611 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.678203 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.678491 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.679227 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.679387 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.679723 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.679764 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.680720 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.690656 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.691672 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.693345 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.693510 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.693649 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.696398 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.698425 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.698720 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.703274 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5464f7ae-9634-4208-a5f5-3e6299f72639-audit-dir\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.703433 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.703623 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-serving-cert\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.703720 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-client-ca\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.703839 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-client-ca\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.703979 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc6463b6-4c8f-48d2-8aca-74626aa632eb-config\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704154 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-etcd-serving-ca\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704262 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-ca\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704346 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rmfh\" (UniqueName: \"kubernetes.io/projected/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-kube-api-access-7rmfh\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704437 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c8dbv\" (UID: \"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704521 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4217565-e1f8-497f-a961-f7823901afb6-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704650 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af48657d-340c-40dd-8c8c-9e4f7250337b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704781 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/428a3826-00fc-4452-8f22-61d02857b761-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704887 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh4dq\" (UniqueName: \"kubernetes.io/projected/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-kube-api-access-qh4dq\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705088 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5snbg\" (UniqueName: \"kubernetes.io/projected/041a0a8d-2984-4158-b873-13944248e6ff-kube-api-access-5snbg\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705232 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhn9q\" (UniqueName: \"kubernetes.io/projected/d4217565-e1f8-497f-a961-f7823901afb6-kube-api-access-rhn9q\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705338 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-service-ca-bundle\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705426 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-available-featuregates\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705513 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-audit-policies\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705603 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-client\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705700 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b431cc6c-6110-4578-8bd2-55f39d1cbe63-metrics-tls\") pod \"dns-operator-744455d44c-6tqmn\" (UID: \"b431cc6c-6110-4578-8bd2-55f39d1cbe63\") " pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705769 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-etcd-serving-ca\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705789 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzhgp\" (UniqueName: \"kubernetes.io/projected/5464f7ae-9634-4208-a5f5-3e6299f72639-kube-api-access-zzhgp\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.704737 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.703739 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5464f7ae-9634-4208-a5f5-3e6299f72639-audit-dir\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705977 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-metrics-tls\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.706208 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-config\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.706301 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-auth-proxy-config\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.710388 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-audit\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.711080 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-image-import-ca\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.711189 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-service-ca\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.711272 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4217565-e1f8-497f-a961-f7823901afb6-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.711384 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.711493 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-etcd-client\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.711642 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/135e9ec7-c882-4894-a24e-669b09be3f5b-audit-dir\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.711815 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.711956 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d2560a1-1eb3-4fad-89c4-100985ef6455-serving-cert\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712045 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s84v8\" (UniqueName: \"kubernetes.io/projected/428a3826-00fc-4452-8f22-61d02857b761-kube-api-access-s84v8\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712136 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-encryption-config\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712217 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041a0a8d-2984-4158-b873-13944248e6ff-config\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712302 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-config\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712385 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712514 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-config\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712616 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rwft\" (UniqueName: \"kubernetes.io/projected/40ab5014-1713-4cec-8577-58a4573025e8-kube-api-access-5rwft\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712699 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-encryption-config\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712780 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thnb8\" (UniqueName: \"kubernetes.io/projected/09fe7cda-3948-484d-bcd9-e83d1ac0610a-kube-api-access-thnb8\") pod \"downloads-7954f5f757-fxxzq\" (UID: \"09fe7cda-3948-484d-bcd9-e83d1ac0610a\") " pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712863 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40ab5014-1713-4cec-8577-58a4573025e8-serving-cert\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.712961 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-trusted-ca-bundle\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.713071 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/041a0a8d-2984-4158-b873-13944248e6ff-serving-cert\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.713141 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-2dwc9"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.706682 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-audit-policies\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.713910 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.713153 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnrs6\" (UniqueName: \"kubernetes.io/projected/af48657d-340c-40dd-8c8c-9e4f7250337b-kube-api-access-hnrs6\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717284 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cc6463b6-4c8f-48d2-8aca-74626aa632eb-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717334 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-config\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717431 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgxw4\" (UniqueName: \"kubernetes.io/projected/b431cc6c-6110-4578-8bd2-55f39d1cbe63-kube-api-access-kgxw4\") pod \"dns-operator-744455d44c-6tqmn\" (UID: \"b431cc6c-6110-4578-8bd2-55f39d1cbe63\") " pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717480 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-config\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717511 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-serving-cert\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717542 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-trusted-ca\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717582 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-config\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717611 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-serving-cert\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717639 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717678 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgrlg\" (UniqueName: \"kubernetes.io/projected/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-kube-api-access-sgrlg\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717721 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc6463b6-4c8f-48d2-8aca-74626aa632eb-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717765 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-serving-cert\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717800 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgtjj\" (UniqueName: \"kubernetes.io/projected/ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68-kube-api-access-bgtjj\") pod \"cluster-samples-operator-665b6dd947-c8dbv\" (UID: \"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717855 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/041a0a8d-2984-4158-b873-13944248e6ff-trusted-ca\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717896 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5464f7ae-9634-4208-a5f5-3e6299f72639-node-pullsecrets\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717951 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/428a3826-00fc-4452-8f22-61d02857b761-config\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.717980 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/428a3826-00fc-4452-8f22-61d02857b761-images\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718004 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-machine-approver-tls\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718077 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-serving-cert\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718109 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfgbg\" (UniqueName: \"kubernetes.io/projected/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-kube-api-access-hfgbg\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718156 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xphz\" (UniqueName: \"kubernetes.io/projected/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-kube-api-access-2xphz\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718191 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-etcd-client\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718232 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nddf\" (UniqueName: \"kubernetes.io/projected/135e9ec7-c882-4894-a24e-669b09be3f5b-kube-api-access-8nddf\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718258 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af48657d-340c-40dd-8c8c-9e4f7250337b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718284 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f6v7\" (UniqueName: \"kubernetes.io/projected/0d2560a1-1eb3-4fad-89c4-100985ef6455-kube-api-access-7f6v7\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.718955 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-audit\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.707456 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-service-ca-bundle\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.720588 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-image-import-ca\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.705871 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-client-ca\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.708286 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-config\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.708605 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-auth-proxy-config\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.725859 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5464f7ae-9634-4208-a5f5-3e6299f72639-node-pullsecrets\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.726775 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-config\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.726937 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-config\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.770332 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5464f7ae-9634-4208-a5f5-3e6299f72639-trusted-ca-bundle\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.771512 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/135e9ec7-c882-4894-a24e-669b09be3f5b-audit-dir\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.772624 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.774331 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.775368 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-config\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.784223 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/40ab5014-1713-4cec-8577-58a4573025e8-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.789060 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/135e9ec7-c882-4894-a24e-669b09be3f5b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.795138 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/428a3826-00fc-4452-8f22-61d02857b761-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.796000 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/428a3826-00fc-4452-8f22-61d02857b761-config\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.798867 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qllmj"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.795112 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/428a3826-00fc-4452-8f22-61d02857b761-images\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.798975 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-etcd-client\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.799750 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-serving-cert\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.800205 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-encryption-config\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.800828 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-machine-approver-tls\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.801137 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/40ab5014-1713-4cec-8577-58a4573025e8-serving-cert\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.801487 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.802365 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.806421 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-encryption-config\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.809316 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-serving-cert\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.819604 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzhgp\" (UniqueName: \"kubernetes.io/projected/5464f7ae-9634-4208-a5f5-3e6299f72639-kube-api-access-zzhgp\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.829311 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5464f7ae-9634-4208-a5f5-3e6299f72639-etcd-client\") pod \"apiserver-76f77b778f-9hgsw\" (UID: \"5464f7ae-9634-4208-a5f5-3e6299f72639\") " pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.831624 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smgl2\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-kube-api-access-smgl2\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.831758 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.831785 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d2560a1-1eb3-4fad-89c4-100985ef6455-serving-cert\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.831807 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041a0a8d-2984-4158-b873-13944248e6ff-config\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.831838 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-config\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.831861 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.831944 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.831995 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thnb8\" (UniqueName: \"kubernetes.io/projected/09fe7cda-3948-484d-bcd9-e83d1ac0610a-kube-api-access-thnb8\") pod \"downloads-7954f5f757-fxxzq\" (UID: \"09fe7cda-3948-484d-bcd9-e83d1ac0610a\") " pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.832039 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/041a0a8d-2984-4158-b873-13944248e6ff-serving-cert\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.832058 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnrs6\" (UniqueName: \"kubernetes.io/projected/af48657d-340c-40dd-8c8c-9e4f7250337b-kube-api-access-hnrs6\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.832999 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cc6463b6-4c8f-48d2-8aca-74626aa632eb-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833079 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-config\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833137 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgxw4\" (UniqueName: \"kubernetes.io/projected/b431cc6c-6110-4578-8bd2-55f39d1cbe63-kube-api-access-kgxw4\") pod \"dns-operator-744455d44c-6tqmn\" (UID: \"b431cc6c-6110-4578-8bd2-55f39d1cbe63\") " pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833177 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-trusted-ca\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833235 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-bound-sa-token\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833273 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc6463b6-4c8f-48d2-8aca-74626aa632eb-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833376 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgtjj\" (UniqueName: \"kubernetes.io/projected/ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68-kube-api-access-bgtjj\") pod \"cluster-samples-operator-665b6dd947-c8dbv\" (UID: \"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833398 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b7abac51-adc5-42fa-9084-033e4e7e7acb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833424 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/041a0a8d-2984-4158-b873-13944248e6ff-trusted-ca\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833464 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-serving-cert\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833486 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfgbg\" (UniqueName: \"kubernetes.io/projected/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-kube-api-access-hfgbg\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833552 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f6v7\" (UniqueName: \"kubernetes.io/projected/0d2560a1-1eb3-4fad-89c4-100985ef6455-kube-api-access-7f6v7\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833581 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 10 10:47:42 crc kubenswrapper[4780]: E1210 10:47:42.833729 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:43.333697109 +0000 UTC m=+168.187090552 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.834289 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.833594 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af48657d-340c-40dd-8c8c-9e4f7250337b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.834769 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-serving-cert\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.834937 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-client-ca\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.835106 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc6463b6-4c8f-48d2-8aca-74626aa632eb-config\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.835266 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-ca\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.835397 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rmfh\" (UniqueName: \"kubernetes.io/projected/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-kube-api-access-7rmfh\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836114 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c8dbv\" (UID: \"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836166 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4217565-e1f8-497f-a961-f7823901afb6-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836203 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af48657d-340c-40dd-8c8c-9e4f7250337b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836229 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5snbg\" (UniqueName: \"kubernetes.io/projected/041a0a8d-2984-4158-b873-13944248e6ff-kube-api-access-5snbg\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836251 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhn9q\" (UniqueName: \"kubernetes.io/projected/d4217565-e1f8-497f-a961-f7823901afb6-kube-api-access-rhn9q\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836278 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh4dq\" (UniqueName: \"kubernetes.io/projected/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-kube-api-access-qh4dq\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836302 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-tls\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836323 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-trusted-ca\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836366 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-available-featuregates\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836404 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-client\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836423 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b431cc6c-6110-4578-8bd2-55f39d1cbe63-metrics-tls\") pod \"dns-operator-744455d44c-6tqmn\" (UID: \"b431cc6c-6110-4578-8bd2-55f39d1cbe63\") " pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.835042 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af48657d-340c-40dd-8c8c-9e4f7250337b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836454 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-metrics-tls\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836506 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-certificates\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836536 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-service-ca\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836555 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4217565-e1f8-497f-a961-f7823901afb6-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836578 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b7abac51-adc5-42fa-9084-033e4e7e7acb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.836579 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/135e9ec7-c882-4894-a24e-669b09be3f5b-serving-cert\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.835430 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.837413 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc6463b6-4c8f-48d2-8aca-74626aa632eb-config\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.837493 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-client-ca\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.838695 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041a0a8d-2984-4158-b873-13944248e6ff-config\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.840365 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.844128 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/041a0a8d-2984-4158-b873-13944248e6ff-trusted-ca\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.844831 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d2560a1-1eb3-4fad-89c4-100985ef6455-serving-cert\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.835483 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.846494 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/041a0a8d-2984-4158-b873-13944248e6ff-serving-cert\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.846793 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.847638 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.848902 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4217565-e1f8-497f-a961-f7823901afb6-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.851180 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-config\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.851571 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-client\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.851786 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-serving-cert\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.851982 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-trusted-ca\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.853129 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d4217565-e1f8-497f-a961-f7823901afb6-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.853791 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-config\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.855862 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.858357 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-ca\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.859046 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-available-featuregates\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.862432 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-etcd-service-ca\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.866422 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-serving-cert\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.869993 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.871057 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.871659 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.871690 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.873361 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.875317 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b431cc6c-6110-4578-8bd2-55f39d1cbe63-metrics-tls\") pod \"dns-operator-744455d44c-6tqmn\" (UID: \"b431cc6c-6110-4578-8bd2-55f39d1cbe63\") " pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.877811 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc6463b6-4c8f-48d2-8aca-74626aa632eb-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.877906 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-qbmwm"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.879140 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.881562 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af48657d-340c-40dd-8c8c-9e4f7250337b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.886624 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c8dbv\" (UID: \"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.889960 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-metrics-tls\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.893884 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.899573 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lrzpt"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.901120 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.901269 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.901943 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.902354 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.902504 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.903125 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.903665 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.908526 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.909404 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.910226 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.913900 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s84v8\" (UniqueName: \"kubernetes.io/projected/428a3826-00fc-4452-8f22-61d02857b761-kube-api-access-s84v8\") pod \"machine-api-operator-5694c8668f-2mb96\" (UID: \"428a3826-00fc-4452-8f22-61d02857b761\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.914949 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.919704 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.920366 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.920575 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.922372 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v57bz"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.922936 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.923372 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.923709 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.923837 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.923974 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.925959 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-6z8d8"] Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.926629 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.934839 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.937559 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.937955 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-certificates\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938047 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fllkn\" (UniqueName: \"kubernetes.io/projected/1d4846ae-8aeb-4940-adb4-ad6726532d8b-kube-api-access-fllkn\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938075 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b7abac51-adc5-42fa-9084-033e4e7e7acb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938116 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938143 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938210 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smgl2\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-kube-api-access-smgl2\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938236 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/52170468-2209-45d0-84f3-d223b1052bf9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938275 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/52170468-2209-45d0-84f3-d223b1052bf9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938315 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g7cj\" (UniqueName: \"kubernetes.io/projected/52170468-2209-45d0-84f3-d223b1052bf9-kube-api-access-4g7cj\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938343 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938419 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938459 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmjht\" (UniqueName: \"kubernetes.io/projected/c2f76bac-1abe-4877-8e25-768778bf5edd-kube-api-access-bmjht\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938501 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-dir\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938561 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/488b5c9e-23f8-47cb-ad7d-40af40abf207-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938612 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-serving-cert\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938640 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938664 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938687 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/488b5c9e-23f8-47cb-ad7d-40af40abf207-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938739 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-bound-sa-token\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938765 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/52170468-2209-45d0-84f3-d223b1052bf9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938813 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b7abac51-adc5-42fa-9084-033e4e7e7acb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938850 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-service-ca\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938872 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-oauth-serving-cert\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938888 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xztxr\" (UniqueName: \"kubernetes.io/projected/c79bfa07-4a71-4560-b706-ac6c81b10ddc-kube-api-access-xztxr\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938904 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2f76bac-1abe-4877-8e25-768778bf5edd-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938936 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-config\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938954 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d4846ae-8aeb-4940-adb4-ad6726532d8b-proxy-tls\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.938986 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/488b5c9e-23f8-47cb-ad7d-40af40abf207-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939015 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939032 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gfvl\" (UniqueName: \"kubernetes.io/projected/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-kube-api-access-6gfvl\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939096 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2f76bac-1abe-4877-8e25-768778bf5edd-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939117 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-policies\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939170 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939219 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939252 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-tls\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939274 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-trusted-ca\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939311 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939330 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-oauth-config\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939355 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-trusted-ca-bundle\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939382 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d4846ae-8aeb-4940-adb4-ad6726532d8b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.939427 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:42 crc kubenswrapper[4780]: E1210 10:47:42.939631 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:43.439603054 +0000 UTC m=+168.292996497 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.941185 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-certificates\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.941900 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b7abac51-adc5-42fa-9084-033e4e7e7acb-ca-trust-extracted\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.954198 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.955848 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-tls\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:42 crc kubenswrapper[4780]: I1210 10:47:42.957210 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b7abac51-adc5-42fa-9084-033e4e7e7acb-installation-pull-secrets\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.058885 4780 request.go:700] Waited for 1.208255693s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/secrets?fieldSelector=metadata.name%3Dv4-0-config-system-serving-cert&limit=500&resourceVersion=0 Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.067803 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-trusted-ca\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.070344 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.126115 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-skxsg"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.128783 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.149660 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.150602 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.158111 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.161714 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.161961 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.162273 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.162398 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.162514 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.163032 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.163811 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.164142 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.164470 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.164463 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.165414 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/52170468-2209-45d0-84f3-d223b1052bf9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.165536 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.165705 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-service-ca\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.167645 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-service-ca\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.170635 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xphz\" (UniqueName: \"kubernetes.io/projected/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-kube-api-access-2xphz\") pod \"route-controller-manager-6576b87f9c-fw9bc\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.171058 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/52170468-2209-45d0-84f3-d223b1052bf9-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.172090 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.173851 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.175744 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.176224 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.176546 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.176865 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.177115 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgrlg\" (UniqueName: \"kubernetes.io/projected/d8851f78-89d9-4d65-b8e3-cb9ad2f74469-kube-api-access-sgrlg\") pod \"machine-approver-56656f9798-wbpvc\" (UID: \"d8851f78-89d9-4d65-b8e3-cb9ad2f74469\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.177641 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.178858 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.179652 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.180969 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.180970 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.181156 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.181408 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.181604 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.183796 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.184650 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.184906 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.185330 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.186148 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5snbg\" (UniqueName: \"kubernetes.io/projected/041a0a8d-2984-4158-b873-13944248e6ff-kube-api-access-5snbg\") pod \"console-operator-58897d9998-52wql\" (UID: \"041a0a8d-2984-4158-b873-13944248e6ff\") " pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.188065 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.188181 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.188197 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.188362 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.189855 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.190446 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.194381 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgtjj\" (UniqueName: \"kubernetes.io/projected/ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68-kube-api-access-bgtjj\") pod \"cluster-samples-operator-665b6dd947-c8dbv\" (UID: \"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.195165 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.198078 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 10 10:47:44 crc kubenswrapper[4780]: E1210 10:47:44.200309 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:45.200235029 +0000 UTC m=+170.053628482 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.200787 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d4846ae-8aeb-4940-adb4-ad6726532d8b-proxy-tls\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.200805 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.201093 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.201132 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-2r7qq"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.201395 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh4dq\" (UniqueName: \"kubernetes.io/projected/027ecd1e-0802-4c3a-b42a-4e272ee3f6fc-kube-api-access-qh4dq\") pod \"openshift-config-operator-7777fb866f-q6q9q\" (UID: \"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.201752 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.201990 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-9hgsw"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202017 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-2mb96"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202031 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fxxzq"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202047 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202059 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-njfgs"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202073 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202086 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202104 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-vbvq9"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202162 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/488b5c9e-23f8-47cb-ad7d-40af40abf207-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202478 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/19f469f0-60aa-4251-88f4-96baafae3f21-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nmvlq\" (UID: \"19f469f0-60aa-4251-88f4-96baafae3f21\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202609 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202629 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-lmmsq"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202643 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202637 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm5mh\" (UniqueName: \"kubernetes.io/projected/19f469f0-60aa-4251-88f4-96baafae3f21-kube-api-access-nm5mh\") pod \"control-plane-machine-set-operator-78cbb6b69f-nmvlq\" (UID: \"19f469f0-60aa-4251-88f4-96baafae3f21\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202680 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-stats-auth\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202656 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202759 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202827 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.202883 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2r7qq" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.203113 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.203367 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.203400 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.203419 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.203684 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2f76bac-1abe-4877-8e25-768778bf5edd-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.203760 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-policies\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.203796 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtxkb\" (UniqueName: \"kubernetes.io/projected/0c750f90-4775-4f44-9edc-53dd41864462-kube-api-access-vtxkb\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204109 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-25gsf"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204345 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204389 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9z2hv\" (UniqueName: \"kubernetes.io/projected/90ac2cea-e1c2-479b-8de0-0917f3779a13-kube-api-access-9z2hv\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204428 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-images\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204484 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204567 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204618 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-trusted-ca-bundle\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204665 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204794 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-default-certificate\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.204958 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205076 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g7cj\" (UniqueName: \"kubernetes.io/projected/52170468-2209-45d0-84f3-d223b1052bf9-kube-api-access-4g7cj\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205108 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205148 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205207 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205238 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0c750f90-4775-4f44-9edc-53dd41864462-signing-cabundle\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205388 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-policies\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205399 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/edec8e8c-042b-46c6-b136-32bf2d144f27-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v57bz\" (UID: \"edec8e8c-042b-46c6-b136-32bf2d144f27\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205509 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-dir\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205544 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205576 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/488b5c9e-23f8-47cb-ad7d-40af40abf207-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205634 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-serving-cert\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205660 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/488b5c9e-23f8-47cb-ad7d-40af40abf207-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205655 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-6z8d8"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205689 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205832 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdslk\" (UniqueName: \"kubernetes.io/projected/7753484d-d587-473d-8630-20cefaad3c7c-kube-api-access-zdslk\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205892 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-oauth-serving-cert\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205945 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xztxr\" (UniqueName: \"kubernetes.io/projected/c79bfa07-4a71-4560-b706-ac6c81b10ddc-kube-api-access-xztxr\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.205978 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2f76bac-1abe-4877-8e25-768778bf5edd-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206008 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-config\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206057 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7753484d-d587-473d-8630-20cefaad3c7c-profile-collector-cert\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206103 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gfvl\" (UniqueName: \"kubernetes.io/projected/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-kube-api-access-6gfvl\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206129 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v99cv\" (UniqueName: \"kubernetes.io/projected/1226614f-560f-40b9-81a2-595e79043653-kube-api-access-v99cv\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206167 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7753484d-d587-473d-8630-20cefaad3c7c-srv-cert\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206212 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1226614f-560f-40b9-81a2-595e79043653-service-ca-bundle\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206526 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-proxy-tls\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206608 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cjsv\" (UniqueName: \"kubernetes.io/projected/edec8e8c-042b-46c6-b136-32bf2d144f27-kube-api-access-2cjsv\") pod \"multus-admission-controller-857f4d67dd-v57bz\" (UID: \"edec8e8c-042b-46c6-b136-32bf2d144f27\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206627 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/488b5c9e-23f8-47cb-ad7d-40af40abf207-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206668 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-oauth-config\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206710 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-dir\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206721 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d4846ae-8aeb-4940-adb4-ad6726532d8b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.206909 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-metrics-certs\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.207600 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.208049 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.210092 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-trusted-ca-bundle\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.210513 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0c750f90-4775-4f44-9edc-53dd41864462-signing-key\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.210638 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fllkn\" (UniqueName: \"kubernetes.io/projected/1d4846ae-8aeb-4940-adb4-ad6726532d8b-kube-api-access-fllkn\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.210713 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg442\" (UniqueName: \"kubernetes.io/projected/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-kube-api-access-tg442\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.210765 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.210877 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/52170468-2209-45d0-84f3-d223b1052bf9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.210951 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/52170468-2209-45d0-84f3-d223b1052bf9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.210998 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.211056 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmjht\" (UniqueName: \"kubernetes.io/projected/c2f76bac-1abe-4877-8e25-768778bf5edd-kube-api-access-bmjht\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.214076 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-oauth-serving-cert\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.214215 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nddf\" (UniqueName: \"kubernetes.io/projected/135e9ec7-c882-4894-a24e-669b09be3f5b-kube-api-access-8nddf\") pod \"apiserver-7bbb656c7d-dgfj7\" (UID: \"135e9ec7-c882-4894-a24e-669b09be3f5b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.215258 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.216348 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2f76bac-1abe-4877-8e25-768778bf5edd-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.216835 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.217551 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cc6463b6-4c8f-48d2-8aca-74626aa632eb-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nfr2m\" (UID: \"cc6463b6-4c8f-48d2-8aca-74626aa632eb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.217706 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-config\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.223767 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.226038 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/488b5c9e-23f8-47cb-ad7d-40af40abf207-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.226766 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgxw4\" (UniqueName: \"kubernetes.io/projected/b431cc6c-6110-4578-8bd2-55f39d1cbe63-kube-api-access-kgxw4\") pod \"dns-operator-744455d44c-6tqmn\" (UID: \"b431cc6c-6110-4578-8bd2-55f39d1cbe63\") " pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.226594 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thnb8\" (UniqueName: \"kubernetes.io/projected/09fe7cda-3948-484d-bcd9-e83d1ac0610a-kube-api-access-thnb8\") pod \"downloads-7954f5f757-fxxzq\" (UID: \"09fe7cda-3948-484d-bcd9-e83d1ac0610a\") " pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.227070 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-oauth-config\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.227296 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: E1210 10:47:44.227499 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:44.727477918 +0000 UTC m=+169.580871361 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.227838 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.227945 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.227244 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rwft\" (UniqueName: \"kubernetes.io/projected/40ab5014-1713-4cec-8577-58a4573025e8-kube-api-access-5rwft\") pod \"authentication-operator-69f744f599-zvngr\" (UID: \"40ab5014-1713-4cec-8577-58a4573025e8\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.228617 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d4846ae-8aeb-4940-adb4-ad6726532d8b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.229218 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.230575 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnrs6\" (UniqueName: \"kubernetes.io/projected/af48657d-340c-40dd-8c8c-9e4f7250337b-kube-api-access-hnrs6\") pod \"openshift-apiserver-operator-796bbdcf4f-hgp6g\" (UID: \"af48657d-340c-40dd-8c8c-9e4f7250337b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.231144 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.232693 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2f76bac-1abe-4877-8e25-768778bf5edd-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.235796 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.236234 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.236941 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/52170468-2209-45d0-84f3-d223b1052bf9-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.237180 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.250217 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.251555 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f6v7\" (UniqueName: \"kubernetes.io/projected/0d2560a1-1eb3-4fad-89c4-100985ef6455-kube-api-access-7f6v7\") pod \"controller-manager-879f6c89f-njfgs\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.251977 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-serving-cert\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.252991 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.254519 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.255567 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.257122 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhn9q\" (UniqueName: \"kubernetes.io/projected/d4217565-e1f8-497f-a961-f7823901afb6-kube-api-access-rhn9q\") pod \"openshift-controller-manager-operator-756b6f6bc6-ns5px\" (UID: \"d4217565-e1f8-497f-a961-f7823901afb6\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.257687 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rmfh\" (UniqueName: \"kubernetes.io/projected/bee2f9e8-49ac-4ead-91c3-2ac4138eb042-kube-api-access-7rmfh\") pod \"etcd-operator-b45778765-lmmsq\" (UID: \"bee2f9e8-49ac-4ead-91c3-2ac4138eb042\") " pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.260460 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfgbg\" (UniqueName: \"kubernetes.io/projected/dc66eb07-7e9e-4093-9d7c-643164e2c8b7-kube-api-access-hfgbg\") pod \"ingress-operator-5b745b69d9-dgx4k\" (UID: \"dc66eb07-7e9e-4093-9d7c-643164e2c8b7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.261097 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.261408 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.266175 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m"] Dec 10 10:47:44 crc kubenswrapper[4780]: I1210 10:47:44.267532 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-6tqmn"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.098170 4780 request.go:700] Waited for 2.171002328s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.098976 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.099222 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.100173 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.100286 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.100511 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.129741 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-52wql"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.139736 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.141062 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.142176 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.142510 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.142878 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.143310 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.143710 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.144285 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.150278 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d4846ae-8aeb-4940-adb4-ad6726532d8b-proxy-tls\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.150550 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.150950 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.151967 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.154494 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.155440 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zvngr"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.156805 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.156866 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.157619 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.172234 4780 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: failed to sync secret cache: timed out waiting for the condition Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.172475 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.181041 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.192214 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.192651 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.192941 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.193077 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template podName:6e560d85-bf0c-4604-9f52-d46fe96b6fe7 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:45.693037046 +0000 UTC m=+170.546430489 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-558db77b4-qllmj" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7") : failed to sync secret cache: timed out waiting for the condition Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.194176 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:46.194165506 +0000 UTC m=+171.047558949 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.194849 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.197796 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtxkb\" (UniqueName: \"kubernetes.io/projected/0c750f90-4775-4f44-9edc-53dd41864462-kube-api-access-vtxkb\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.197854 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/75086894-fa83-41dd-ad4e-cca8d61f0869-tmpfs\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.197878 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brm9t\" (UniqueName: \"kubernetes.io/projected/51f53e78-be26-49e0-b4ad-32de3129ddeb-kube-api-access-brm9t\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.197934 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9z2hv\" (UniqueName: \"kubernetes.io/projected/90ac2cea-e1c2-479b-8de0-0917f3779a13-kube-api-access-9z2hv\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.198419 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-images\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.198468 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-default-certificate\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.198787 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/75086894-fa83-41dd-ad4e-cca8d61f0869-webhook-cert\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.198903 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7dws\" (UniqueName: \"kubernetes.io/projected/536a1b3e-4cda-4ed7-985a-595c13968356-kube-api-access-f7dws\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.198990 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/75086894-fa83-41dd-ad4e-cca8d61f0869-apiservice-cert\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.199278 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0c750f90-4775-4f44-9edc-53dd41864462-signing-cabundle\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.199330 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/edec8e8c-042b-46c6-b136-32bf2d144f27-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v57bz\" (UID: \"edec8e8c-042b-46c6-b136-32bf2d144f27\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.199357 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.200445 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-images\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.200498 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/51f53e78-be26-49e0-b4ad-32de3129ddeb-node-bootstrap-token\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201365 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdslk\" (UniqueName: \"kubernetes.io/projected/7753484d-d587-473d-8630-20cefaad3c7c-kube-api-access-zdslk\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201440 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/536a1b3e-4cda-4ed7-985a-595c13968356-secret-volume\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201470 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7753484d-d587-473d-8630-20cefaad3c7c-profile-collector-cert\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201501 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v99cv\" (UniqueName: \"kubernetes.io/projected/1226614f-560f-40b9-81a2-595e79043653-kube-api-access-v99cv\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201537 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7753484d-d587-473d-8630-20cefaad3c7c-srv-cert\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201604 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1226614f-560f-40b9-81a2-595e79043653-service-ca-bundle\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201673 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cjsv\" (UniqueName: \"kubernetes.io/projected/edec8e8c-042b-46c6-b136-32bf2d144f27-kube-api-access-2cjsv\") pod \"multus-admission-controller-857f4d67dd-v57bz\" (UID: \"edec8e8c-042b-46c6-b136-32bf2d144f27\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201716 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-proxy-tls\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201741 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-config\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201774 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201808 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-metrics-certs\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201829 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0c750f90-4775-4f44-9edc-53dd41864462-signing-key\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201880 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg442\" (UniqueName: \"kubernetes.io/projected/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-kube-api-access-tg442\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202169 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9sbp\" (UniqueName: \"kubernetes.io/projected/75086894-fa83-41dd-ad4e-cca8d61f0869-kube-api-access-r9sbp\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202219 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202255 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtp9q\" (UniqueName: \"kubernetes.io/projected/007684b0-7499-47ad-b2d4-27051578f4cc-kube-api-access-qtp9q\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202280 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2mww\" (UniqueName: \"kubernetes.io/projected/a3427a27-b233-4e0f-a9ae-25a0d67c6b65-kube-api-access-s2mww\") pod \"migrator-59844c95c7-x9cz8\" (UID: \"a3427a27-b233-4e0f-a9ae-25a0d67c6b65\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202329 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202371 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202404 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/007684b0-7499-47ad-b2d4-27051578f4cc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202424 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/007684b0-7499-47ad-b2d4-27051578f4cc-srv-cert\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202459 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/536a1b3e-4cda-4ed7-985a-595c13968356-config-volume\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202476 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/51f53e78-be26-49e0-b4ad-32de3129ddeb-certs\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202494 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-stats-auth\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202532 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/19f469f0-60aa-4251-88f4-96baafae3f21-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nmvlq\" (UID: \"19f469f0-60aa-4251-88f4-96baafae3f21\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202558 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm5mh\" (UniqueName: \"kubernetes.io/projected/19f469f0-60aa-4251-88f4-96baafae3f21-kube-api-access-nm5mh\") pod \"control-plane-machine-set-operator-78cbb6b69f-nmvlq\" (UID: \"19f469f0-60aa-4251-88f4-96baafae3f21\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202546 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.202635 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0c750f90-4775-4f44-9edc-53dd41864462-signing-cabundle\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.201529 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.204280 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.204370 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.204620 4780 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.204738 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.207236 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.208228 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.209473 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v57bz"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.210748 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gfvl\" (UniqueName: \"kubernetes.io/projected/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-kube-api-access-6gfvl\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.212562 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.213085 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.213832 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/488b5c9e-23f8-47cb-ad7d-40af40abf207-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-kc2sh\" (UID: \"488b5c9e-23f8-47cb-ad7d-40af40abf207\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.214752 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.215004 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.215215 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.215424 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.216673 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.217064 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.217075 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.217192 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.218425 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.220188 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.228243 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-snzm9"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.229634 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.229665 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.229786 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.234033 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.234637 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.234901 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.235171 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.235311 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/52170468-2209-45d0-84f3-d223b1052bf9-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.235361 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.235618 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7753484d-d587-473d-8630-20cefaad3c7c-profile-collector-cert\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.238972 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-metrics-certs\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.239477 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.240407 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-default-certificate\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.240510 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.240967 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.240981 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fllkn\" (UniqueName: \"kubernetes.io/projected/1d4846ae-8aeb-4940-adb4-ad6726532d8b-kube-api-access-fllkn\") pod \"machine-config-controller-84d6567774-jmkwd\" (UID: \"1d4846ae-8aeb-4940-adb4-ad6726532d8b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.242465 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.243033 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle podName:6e560d85-bf0c-4604-9f52-d46fe96b6fe7 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:45.742979834 +0000 UTC m=+170.596373277 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-558db77b4-qllmj" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7") : failed to sync configmap cache: timed out waiting for the condition Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.243476 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-2dwc9"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.243584 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.245058 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1226614f-560f-40b9-81a2-595e79043653-service-ca-bundle\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.245110 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.245946 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-skxsg"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.247158 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.248022 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7753484d-d587-473d-8630-20cefaad3c7c-srv-cert\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.256497 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg442\" (UniqueName: \"kubernetes.io/projected/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-kube-api-access-tg442\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.257512 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b-proxy-tls\") pod \"machine-config-operator-74547568cd-rlc8b\" (UID: \"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.258419 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g7cj\" (UniqueName: \"kubernetes.io/projected/52170468-2209-45d0-84f3-d223b1052bf9-kube-api-access-4g7cj\") pod \"cluster-image-registry-operator-dc59b4c8b-b6pdb\" (UID: \"52170468-2209-45d0-84f3-d223b1052bf9\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.260818 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0c750f90-4775-4f44-9edc-53dd41864462-signing-key\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.260818 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmjht\" (UniqueName: \"kubernetes.io/projected/c2f76bac-1abe-4877-8e25-768778bf5edd-kube-api-access-bmjht\") pod \"kube-storage-version-migrator-operator-b67b599dd-56x4k\" (UID: \"c2f76bac-1abe-4877-8e25-768778bf5edd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.261433 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm5mh\" (UniqueName: \"kubernetes.io/projected/19f469f0-60aa-4251-88f4-96baafae3f21-kube-api-access-nm5mh\") pod \"control-plane-machine-set-operator-78cbb6b69f-nmvlq\" (UID: \"19f469f0-60aa-4251-88f4-96baafae3f21\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.263747 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-snzm9"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.263835 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtxkb\" (UniqueName: \"kubernetes.io/projected/0c750f90-4775-4f44-9edc-53dd41864462-kube-api-access-vtxkb\") pod \"service-ca-9c57cc56f-6z8d8\" (UID: \"0c750f90-4775-4f44-9edc-53dd41864462\") " pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.270325 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.270324 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9z2hv\" (UniqueName: \"kubernetes.io/projected/90ac2cea-e1c2-479b-8de0-0917f3779a13-kube-api-access-9z2hv\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.273174 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/19f469f0-60aa-4251-88f4-96baafae3f21-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nmvlq\" (UID: \"19f469f0-60aa-4251-88f4-96baafae3f21\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.276046 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lrzpt"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.276176 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2r7qq"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.276194 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7vv54"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.278201 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.286683 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qllmj"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.305942 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306301 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kx6dw\" (UniqueName: \"kubernetes.io/projected/c725ccf2-5e48-4267-b43b-df62e400d0d5-kube-api-access-kx6dw\") pod \"ingress-canary-2r7qq\" (UID: \"c725ccf2-5e48-4267-b43b-df62e400d0d5\") " pod="openshift-ingress-canary/ingress-canary-2r7qq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306343 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/007684b0-7499-47ad-b2d4-27051578f4cc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306365 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/007684b0-7499-47ad-b2d4-27051578f4cc-srv-cert\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306405 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/536a1b3e-4cda-4ed7-985a-595c13968356-config-volume\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306429 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/51f53e78-be26-49e0-b4ad-32de3129ddeb-certs\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306512 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c725ccf2-5e48-4267-b43b-df62e400d0d5-cert\") pod \"ingress-canary-2r7qq\" (UID: \"c725ccf2-5e48-4267-b43b-df62e400d0d5\") " pod="openshift-ingress-canary/ingress-canary-2r7qq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306543 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/75086894-fa83-41dd-ad4e-cca8d61f0869-tmpfs\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306565 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brm9t\" (UniqueName: \"kubernetes.io/projected/51f53e78-be26-49e0-b4ad-32de3129ddeb-kube-api-access-brm9t\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306635 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-config\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306693 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/75086894-fa83-41dd-ad4e-cca8d61f0869-webhook-cert\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306719 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7dws\" (UniqueName: \"kubernetes.io/projected/536a1b3e-4cda-4ed7-985a-595c13968356-kube-api-access-f7dws\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306754 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/657131c5-4c67-4388-bd84-4efa0978219f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hx2j2\" (UID: \"657131c5-4c67-4388-bd84-4efa0978219f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306796 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/75086894-fa83-41dd-ad4e-cca8d61f0869-apiservice-cert\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306855 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/51f53e78-be26-49e0-b4ad-32de3129ddeb-node-bootstrap-token\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.306950 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/536a1b3e-4cda-4ed7-985a-595c13968356-secret-volume\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307031 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-config\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307077 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307103 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hvp9\" (UniqueName: \"kubernetes.io/projected/657131c5-4c67-4388-bd84-4efa0978219f-kube-api-access-5hvp9\") pod \"package-server-manager-789f6589d5-hx2j2\" (UID: \"657131c5-4c67-4388-bd84-4efa0978219f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307137 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv4v4\" (UniqueName: \"kubernetes.io/projected/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-kube-api-access-jv4v4\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307160 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-serving-cert\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307190 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9sbp\" (UniqueName: \"kubernetes.io/projected/75086894-fa83-41dd-ad4e-cca8d61f0869-kube-api-access-r9sbp\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307223 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtp9q\" (UniqueName: \"kubernetes.io/projected/007684b0-7499-47ad-b2d4-27051578f4cc-kube-api-access-qtp9q\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307245 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2mww\" (UniqueName: \"kubernetes.io/projected/a3427a27-b233-4e0f-a9ae-25a0d67c6b65-kube-api-access-s2mww\") pod \"migrator-59844c95c7-x9cz8\" (UID: \"a3427a27-b233-4e0f-a9ae-25a0d67c6b65\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.307288 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.332965 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:45.832938468 +0000 UTC m=+170.686331911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.334730 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/75086894-fa83-41dd-ad4e-cca8d61f0869-tmpfs\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.337709 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/536a1b3e-4cda-4ed7-985a-595c13968356-config-volume\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.341552 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-config\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.372499 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdslk\" (UniqueName: \"kubernetes.io/projected/7753484d-d587-473d-8630-20cefaad3c7c-kube-api-access-zdslk\") pod \"catalog-operator-68c6474976-4sfhd\" (UID: \"7753484d-d587-473d-8630-20cefaad3c7c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.379114 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/1226614f-560f-40b9-81a2-595e79043653-stats-auth\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.379235 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7vv54"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.379684 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.379760 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.379773 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2"] Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.379784 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.379982 4780 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.379854 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.380456 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/75086894-fa83-41dd-ad4e-cca8d61f0869-apiservice-cert\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.382235 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/51f53e78-be26-49e0-b4ad-32de3129ddeb-node-bootstrap-token\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.386694 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/536a1b3e-4cda-4ed7-985a-595c13968356-secret-volume\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.388166 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.391738 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cjsv\" (UniqueName: \"kubernetes.io/projected/edec8e8c-042b-46c6-b136-32bf2d144f27-kube-api-access-2cjsv\") pod \"multus-admission-controller-857f4d67dd-v57bz\" (UID: \"edec8e8c-042b-46c6-b136-32bf2d144f27\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.392601 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/edec8e8c-042b-46c6-b136-32bf2d144f27-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v57bz\" (UID: \"edec8e8c-042b-46c6-b136-32bf2d144f27\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.392947 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lrzpt\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.393494 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xztxr\" (UniqueName: \"kubernetes.io/projected/c79bfa07-4a71-4560-b706-ac6c81b10ddc-kube-api-access-xztxr\") pod \"console-f9d7485db-2dwc9\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.397236 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-bound-sa-token\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.397480 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/75086894-fa83-41dd-ad4e-cca8d61f0869-webhook-cert\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.397736 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/007684b0-7499-47ad-b2d4-27051578f4cc-srv-cert\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.399962 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v99cv\" (UniqueName: \"kubernetes.io/projected/1226614f-560f-40b9-81a2-595e79043653-kube-api-access-v99cv\") pod \"router-default-5444994796-qbmwm\" (UID: \"1226614f-560f-40b9-81a2-595e79043653\") " pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.400748 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smgl2\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-kube-api-access-smgl2\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.400952 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/007684b0-7499-47ad-b2d4-27051578f4cc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.400938 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/51f53e78-be26-49e0-b4ad-32de3129ddeb-certs\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.409946 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-mountpoint-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.410074 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c725ccf2-5e48-4267-b43b-df62e400d0d5-cert\") pod \"ingress-canary-2r7qq\" (UID: \"c725ccf2-5e48-4267-b43b-df62e400d0d5\") " pod="openshift-ingress-canary/ingress-canary-2r7qq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.410238 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e936221-208e-4223-a97b-ec48717552d0-config-volume\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.410293 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-config\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.410386 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/657131c5-4c67-4388-bd84-4efa0978219f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hx2j2\" (UID: \"657131c5-4c67-4388-bd84-4efa0978219f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.410461 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1e936221-208e-4223-a97b-ec48717552d0-metrics-tls\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.410634 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-csi-data-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.411337 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.411457 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-plugins-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.411582 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-registration-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.411688 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wh27w\" (UniqueName: \"kubernetes.io/projected/1e936221-208e-4223-a97b-ec48717552d0-kube-api-access-wh27w\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.411746 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-socket-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.411835 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hvp9\" (UniqueName: \"kubernetes.io/projected/657131c5-4c67-4388-bd84-4efa0978219f-kube-api-access-5hvp9\") pod \"package-server-manager-789f6589d5-hx2j2\" (UID: \"657131c5-4c67-4388-bd84-4efa0978219f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.411944 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv4v4\" (UniqueName: \"kubernetes.io/projected/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-kube-api-access-jv4v4\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.412038 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-serving-cert\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.412110 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnsxz\" (UniqueName: \"kubernetes.io/projected/57848ad9-ff93-4d90-9eb5-0825e149694d-kube-api-access-jnsxz\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.412293 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kx6dw\" (UniqueName: \"kubernetes.io/projected/c725ccf2-5e48-4267-b43b-df62e400d0d5-kube-api-access-kx6dw\") pod \"ingress-canary-2r7qq\" (UID: \"c725ccf2-5e48-4267-b43b-df62e400d0d5\") " pod="openshift-ingress-canary/ingress-canary-2r7qq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.417484 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-config\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.489235 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:45.989196181 +0000 UTC m=+170.842589624 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.497555 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c725ccf2-5e48-4267-b43b-df62e400d0d5-cert\") pod \"ingress-canary-2r7qq\" (UID: \"c725ccf2-5e48-4267-b43b-df62e400d0d5\") " pod="openshift-ingress-canary/ingress-canary-2r7qq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.698601 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.699404 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnsxz\" (UniqueName: \"kubernetes.io/projected/57848ad9-ff93-4d90-9eb5-0825e149694d-kube-api-access-jnsxz\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.699516 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.699638 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-mountpoint-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.699776 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e936221-208e-4223-a97b-ec48717552d0-config-volume\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.701252 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1e936221-208e-4223-a97b-ec48717552d0-metrics-tls\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.701305 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-csi-data-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.701373 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-plugins-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.701444 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-registration-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.701496 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wh27w\" (UniqueName: \"kubernetes.io/projected/1e936221-208e-4223-a97b-ec48717552d0-kube-api-access-wh27w\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.701524 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-socket-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.702107 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-socket-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.707956 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-plugins-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.708971 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:46.208872698 +0000 UTC m=+171.062266141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.709134 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-registration-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.709265 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-csi-data-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.709741 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/57848ad9-ff93-4d90-9eb5-0825e149694d-mountpoint-dir\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.716776 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e936221-208e-4223-a97b-ec48717552d0-config-volume\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.743453 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1e936221-208e-4223-a97b-ec48717552d0-metrics-tls\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.789331 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.789563 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-serving-cert\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.789622 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/657131c5-4c67-4388-bd84-4efa0978219f-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hx2j2\" (UID: \"657131c5-4c67-4388-bd84-4efa0978219f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.795973 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv4v4\" (UniqueName: \"kubernetes.io/projected/1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0-kube-api-access-jv4v4\") pod \"service-ca-operator-777779d784-skxsg\" (UID: \"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.797613 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtp9q\" (UniqueName: \"kubernetes.io/projected/007684b0-7499-47ad-b2d4-27051578f4cc-kube-api-access-qtp9q\") pod \"olm-operator-6b444d44fb-st68f\" (UID: \"007684b0-7499-47ad-b2d4-27051578f4cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.798585 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-6ffpq\" (UID: \"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.799657 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brm9t\" (UniqueName: \"kubernetes.io/projected/51f53e78-be26-49e0-b4ad-32de3129ddeb-kube-api-access-brm9t\") pod \"machine-config-server-vbvq9\" (UID: \"51f53e78-be26-49e0-b4ad-32de3129ddeb\") " pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.801602 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.804265 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.804481 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.805406 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:46.305388905 +0000 UTC m=+171.158782348 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.807264 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qllmj\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.807822 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnsxz\" (UniqueName: \"kubernetes.io/projected/57848ad9-ff93-4d90-9eb5-0825e149694d-kube-api-access-jnsxz\") pod \"csi-hostpathplugin-7vv54\" (UID: \"57848ad9-ff93-4d90-9eb5-0825e149694d\") " pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.808308 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wh27w\" (UniqueName: \"kubernetes.io/projected/1e936221-208e-4223-a97b-ec48717552d0-kube-api-access-wh27w\") pod \"dns-default-snzm9\" (UID: \"1e936221-208e-4223-a97b-ec48717552d0\") " pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.808322 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.809844 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7dws\" (UniqueName: \"kubernetes.io/projected/536a1b3e-4cda-4ed7-985a-595c13968356-kube-api-access-f7dws\") pod \"collect-profiles-29422725-lvhr4\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.812785 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hvp9\" (UniqueName: \"kubernetes.io/projected/657131c5-4c67-4388-bd84-4efa0978219f-kube-api-access-5hvp9\") pod \"package-server-manager-789f6589d5-hx2j2\" (UID: \"657131c5-4c67-4388-bd84-4efa0978219f\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.813874 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9sbp\" (UniqueName: \"kubernetes.io/projected/75086894-fa83-41dd-ad4e-cca8d61f0869-kube-api-access-r9sbp\") pod \"packageserver-d55dfcdfc-csjzj\" (UID: \"75086894-fa83-41dd-ad4e-cca8d61f0869\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.817004 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2mww\" (UniqueName: \"kubernetes.io/projected/a3427a27-b233-4e0f-a9ae-25a0d67c6b65-kube-api-access-s2mww\") pod \"migrator-59844c95c7-x9cz8\" (UID: \"a3427a27-b233-4e0f-a9ae-25a0d67c6b65\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.817137 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kx6dw\" (UniqueName: \"kubernetes.io/projected/c725ccf2-5e48-4267-b43b-df62e400d0d5-kube-api-access-kx6dw\") pod \"ingress-canary-2r7qq\" (UID: \"c725ccf2-5e48-4267-b43b-df62e400d0d5\") " pod="openshift-ingress-canary/ingress-canary-2r7qq" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.828045 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.842860 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:45 crc kubenswrapper[4780]: I1210 10:47:45.909029 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:45 crc kubenswrapper[4780]: E1210 10:47:45.910865 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:46.410822577 +0000 UTC m=+171.264216020 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.370204 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:46 crc kubenswrapper[4780]: E1210 10:47:46.372547 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:46.872525899 +0000 UTC m=+171.725919352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.444509 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" event={"ID":"d8851f78-89d9-4d65-b8e3-cb9ad2f74469","Type":"ContainerStarted","Data":"5776091b6d822908a984cd18e9045a0e635da7f0b52d691a4aca93f41236fdd7"} Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.523708 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:46 crc kubenswrapper[4780]: E1210 10:47:46.524198 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.024181611 +0000 UTC m=+171.877575054 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.595430 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.626639 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:46 crc kubenswrapper[4780]: E1210 10:47:46.627628 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.12761022 +0000 UTC m=+171.981003663 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.629878 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.728272 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:46 crc kubenswrapper[4780]: E1210 10:47:46.729095 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.229074027 +0000 UTC m=+172.082467470 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.796798 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.831757 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:46 crc kubenswrapper[4780]: E1210 10:47:46.833000 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.332898037 +0000 UTC m=+172.186291650 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.923280 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-2mb96"] Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.932504 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.933956 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:46 crc kubenswrapper[4780]: E1210 10:47:46.934226 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.43419655 +0000 UTC m=+172.287589993 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.934540 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:46 crc kubenswrapper[4780]: E1210 10:47:46.935060 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.435051223 +0000 UTC m=+172.288444666 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.941970 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-9hgsw"] Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.951369 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.967794 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" Dec 10 10:47:46 crc kubenswrapper[4780]: I1210 10:47:46.980697 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:47 crc kubenswrapper[4780]: I1210 10:47:46.998698 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" Dec 10 10:47:47 crc kubenswrapper[4780]: I1210 10:47:47.014735 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:47:47 crc kubenswrapper[4780]: I1210 10:47:47.022890 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:47:47 crc kubenswrapper[4780]: I1210 10:47:47.036100 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:47 crc kubenswrapper[4780]: E1210 10:47:47.045358 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.545321472 +0000 UTC m=+172.398714915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:47 crc kubenswrapper[4780]: I1210 10:47:47.939838 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" event={"ID":"d8851f78-89d9-4d65-b8e3-cb9ad2f74469","Type":"ContainerStarted","Data":"cc6e15e7e9eb19d8ac2cfde98c82cf9293c58b24c87f884ad0ca128d8c407db5"} Dec 10 10:47:47 crc kubenswrapper[4780]: I1210 10:47:47.941490 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" event={"ID":"428a3826-00fc-4452-8f22-61d02857b761","Type":"ContainerStarted","Data":"2f9e78d63c8af32cd2a38606bec40e426aa20a649b5fd8f4d4db78fb95819101"} Dec 10 10:47:47 crc kubenswrapper[4780]: I1210 10:47:47.942517 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qbmwm" event={"ID":"1226614f-560f-40b9-81a2-595e79043653","Type":"ContainerStarted","Data":"d5d8b6ac4c99ae5a779e7393a55635dc1056b150bf4391a19de405d9bfe5679a"} Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.061604 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.063370 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.063341305 +0000 UTC m=+173.916734748 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.168501 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.169466 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:48.669449355 +0000 UTC m=+173.522842798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.216120 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.274530 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.275690 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:48.775639707 +0000 UTC m=+173.629033150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.395345 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.395617 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.396350 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:48.896322602 +0000 UTC m=+173.749716225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.421839 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/24187953-1dc5-48d7-b00c-1e5876604b6b-metrics-certs\") pod \"network-metrics-daemon-46s5p\" (UID: \"24187953-1dc5-48d7-b00c-1e5876604b6b\") " pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.500358 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.501129 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.001106487 +0000 UTC m=+173.854499930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.602465 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.603137 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.103113639 +0000 UTC m=+173.956507082 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.680848 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.722218 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.724242 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-46s5p" Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.725275 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.225248922 +0000 UTC m=+174.078642365 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.725389 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.726993 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.226976877 +0000 UTC m=+174.080370320 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.763077 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.827222 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.827377 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.327350106 +0000 UTC m=+174.180743549 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.829867 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.831027 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.331002802 +0000 UTC m=+174.184396245 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.964226 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.964502 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.464472234 +0000 UTC m=+174.317865677 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.964707 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:48 crc kubenswrapper[4780]: E1210 10:47:48.965231 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.465210863 +0000 UTC m=+174.318604306 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:48 crc kubenswrapper[4780]: I1210 10:47:48.975945 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" event={"ID":"5464f7ae-9634-4208-a5f5-3e6299f72639","Type":"ContainerStarted","Data":"817c4db450c955f5323005cf2a152588fd6c1120c0324a3a02b0b74bc5d91ba0"} Dec 10 10:47:49 crc kubenswrapper[4780]: I1210 10:47:49.129034 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:49 crc kubenswrapper[4780]: E1210 10:47:49.129319 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.629269343 +0000 UTC m=+174.482662786 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:49 crc kubenswrapper[4780]: I1210 10:47:49.129500 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:49 crc kubenswrapper[4780]: E1210 10:47:49.130153 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:49.630140616 +0000 UTC m=+174.483534059 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:49 crc kubenswrapper[4780]: I1210 10:47:49.133336 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-52wql"] Dec 10 10:47:49 crc kubenswrapper[4780]: I1210 10:47:49.669621 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:49 crc kubenswrapper[4780]: E1210 10:47:49.671111 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:50.671090039 +0000 UTC m=+175.524483482 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.350998 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.355025 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" Dec 10 10:47:50 crc kubenswrapper[4780]: E1210 10:47:50.361779 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:50.861735693 +0000 UTC m=+175.715129136 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.457287 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.461994 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:50 crc kubenswrapper[4780]: E1210 10:47:50.488893 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:50.988837477 +0000 UTC m=+175.842230920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.610968 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:50 crc kubenswrapper[4780]: E1210 10:47:50.611577 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.111549225 +0000 UTC m=+175.964942668 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.613114 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:50 crc kubenswrapper[4780]: E1210 10:47:50.620949 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.120934723 +0000 UTC m=+175.974328156 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.629629 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-52wql" event={"ID":"041a0a8d-2984-4158-b873-13944248e6ff","Type":"ContainerStarted","Data":"82789c802d0f9ef9d5a52bc5a49d3d678266ef2096e1c793dde9c508f72af562"} Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.801383 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:50 crc kubenswrapper[4780]: E1210 10:47:50.802206 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.302128764 +0000 UTC m=+176.155522197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:50 crc kubenswrapper[4780]: I1210 10:47:50.969464 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:50 crc kubenswrapper[4780]: E1210 10:47:50.970067 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.470043185 +0000 UTC m=+176.323436628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.096089 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.097449 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.597403196 +0000 UTC m=+176.450796639 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.253735 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.254298 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.754279552 +0000 UTC m=+176.607672995 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.360995 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.361398 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.861356513 +0000 UTC m=+176.714750106 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.361487 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.362249 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.862218336 +0000 UTC m=+176.715611779 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.469713 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.470314 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.970290005 +0000 UTC m=+176.823683448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.491255 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.571531 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.572489 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.072462432 +0000 UTC m=+176.925855875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.672822 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.673267 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.17319368 +0000 UTC m=+177.026587123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.674085 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.675226 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.175214233 +0000 UTC m=+177.028607676 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.772720 4780 generic.go:334] "Generic (PLEG): container finished" podID="5464f7ae-9634-4208-a5f5-3e6299f72639" containerID="922d32265dc4fb67a89b8362d86b71e7c5c36d55dbaa829f657c0e2bd638c46f" exitCode=0 Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.772904 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" event={"ID":"5464f7ae-9634-4208-a5f5-3e6299f72639","Type":"ContainerDied","Data":"922d32265dc4fb67a89b8362d86b71e7c5c36d55dbaa829f657c0e2bd638c46f"} Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.775852 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.776276 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.276224748 +0000 UTC m=+177.129618191 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.776451 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.777497 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.27744201 +0000 UTC m=+177.130835453 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.777725 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" event={"ID":"d8851f78-89d9-4d65-b8e3-cb9ad2f74469","Type":"ContainerStarted","Data":"f3a492d7a4c8ff27a0995a32cd90c41f88b8670658feac6d311ba53d30b2ab10"} Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.779535 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qbmwm" event={"ID":"1226614f-560f-40b9-81a2-595e79043653","Type":"ContainerStarted","Data":"86cbc3807756197f9a55c8e9c4ade7dff2383ee0dc47f89124d44891e8e1c21c"} Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.782718 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" event={"ID":"428a3826-00fc-4452-8f22-61d02857b761","Type":"ContainerStarted","Data":"6a4165d9a4769b41d1a4f61b1e240d47c0a7269e9bb9bc22171badf27837735a"} Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.798259 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.831323 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-wbpvc" podStartSLOduration=149.831296612 podStartE2EDuration="2m29.831296612s" podCreationTimestamp="2025-12-10 10:45:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:51.824620635 +0000 UTC m=+176.678014078" watchObservedRunningTime="2025-12-10 10:47:51.831296612 +0000 UTC m=+176.684690045" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.850333 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-qbmwm" podStartSLOduration=148.850299123 podStartE2EDuration="2m28.850299123s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:51.850037396 +0000 UTC m=+176.703430849" watchObservedRunningTime="2025-12-10 10:47:51.850299123 +0000 UTC m=+176.703692566" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.873014 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.873199 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.877145 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.878399 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.378376544 +0000 UTC m=+177.231769987 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.893572 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-vbvq9" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.898259 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2r7qq" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.959852 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-7vv54" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.962294 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-snzm9" Dec 10 10:47:51 crc kubenswrapper[4780]: I1210 10:47:51.979170 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:51 crc kubenswrapper[4780]: E1210 10:47:51.979703 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.479684317 +0000 UTC m=+177.333077760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.005638 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.082080 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:52 crc kubenswrapper[4780]: E1210 10:47:52.085117 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.585082528 +0000 UTC m=+177.438476151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.104191 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.115827 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-njfgs"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.123442 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-6tqmn"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.163327 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.442844 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:52 crc kubenswrapper[4780]: E1210 10:47:52.443468 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.943449385 +0000 UTC m=+177.796842828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.458159 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zvngr"] Dec 10 10:47:52 crc kubenswrapper[4780]: W1210 10:47:52.464439 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51f53e78_be26_49e0_b4ad_32de3129ddeb.slice/crio-3cdf228503b78b59a94daf1108d87cebe66dfa916af3449d43a083106e4e597d WatchSource:0}: Error finding container 3cdf228503b78b59a94daf1108d87cebe66dfa916af3449d43a083106e4e597d: Status 404 returned error can't find the container with id 3cdf228503b78b59a94daf1108d87cebe66dfa916af3449d43a083106e4e597d Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.473188 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.490079 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.511722 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q"] Dec 10 10:47:52 crc kubenswrapper[4780]: W1210 10:47:52.513793 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4217565_e1f8_497f_a961_f7823901afb6.slice/crio-bac4225797b25e38a85e1ac9e5009938a51517171d2defd2f9106be8c3a6f9aa WatchSource:0}: Error finding container bac4225797b25e38a85e1ac9e5009938a51517171d2defd2f9106be8c3a6f9aa: Status 404 returned error can't find the container with id bac4225797b25e38a85e1ac9e5009938a51517171d2defd2f9106be8c3a6f9aa Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.513940 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fxxzq"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.523806 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-lmmsq"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.527157 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.550388 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.552416 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:52 crc kubenswrapper[4780]: E1210 10:47:52.558163 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.058120771 +0000 UTC m=+177.911514214 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.568166 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.571535 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-2dwc9"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.590820 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.595085 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.600880 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qllmj"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.608715 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8"] Dec 10 10:47:52 crc kubenswrapper[4780]: W1210 10:47:52.648014 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d4846ae_8aeb_4940_adb4_ad6726532d8b.slice/crio-3b82e66601733cd87dc0a58269c73d547746467ef3a9b39d5cbbecaf7c849753 WatchSource:0}: Error finding container 3b82e66601733cd87dc0a58269c73d547746467ef3a9b39d5cbbecaf7c849753: Status 404 returned error can't find the container with id 3b82e66601733cd87dc0a58269c73d547746467ef3a9b39d5cbbecaf7c849753 Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.657448 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:52 crc kubenswrapper[4780]: E1210 10:47:52.657951 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.157912794 +0000 UTC m=+178.011306237 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.682609 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-skxsg"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.720851 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.755237 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-46s5p"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.758516 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:52 crc kubenswrapper[4780]: E1210 10:47:52.759021 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.258996451 +0000 UTC m=+178.112389894 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4780]: W1210 10:47:52.760491 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3427a27_b233_4e0f_a9ae_25a0d67c6b65.slice/crio-5280f3742d817c3f6a84ce92fecd733d1c69c6c4effd5a65c6336ab29b1ed236 WatchSource:0}: Error finding container 5280f3742d817c3f6a84ce92fecd733d1c69c6c4effd5a65c6336ab29b1ed236: Status 404 returned error can't find the container with id 5280f3742d817c3f6a84ce92fecd733d1c69c6c4effd5a65c6336ab29b1ed236 Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.767334 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k"] Dec 10 10:47:52 crc kubenswrapper[4780]: W1210 10:47:52.769213 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e560d85_bf0c_4604_9f52_d46fe96b6fe7.slice/crio-758d20cb39318e970e5705358100b24f05aedc609cc5eae03c2cef346be887bc WatchSource:0}: Error finding container 758d20cb39318e970e5705358100b24f05aedc609cc5eae03c2cef346be887bc: Status 404 returned error can't find the container with id 758d20cb39318e970e5705358100b24f05aedc609cc5eae03c2cef346be887bc Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.809483 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.809595 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.809975 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v57bz"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.813561 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.824637 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-vbvq9" event={"ID":"51f53e78-be26-49e0-b4ad-32de3129ddeb","Type":"ContainerStarted","Data":"3cdf228503b78b59a94daf1108d87cebe66dfa916af3449d43a083106e4e597d"} Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.918611 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:52 crc kubenswrapper[4780]: E1210 10:47:52.919541 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.419523387 +0000 UTC m=+178.272916830 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.921306 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.922928 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.928235 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" event={"ID":"d4217565-e1f8-497f-a961-f7823901afb6","Type":"ContainerStarted","Data":"bac4225797b25e38a85e1ac9e5009938a51517171d2defd2f9106be8c3a6f9aa"} Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.933566 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.939901 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-6z8d8"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.944422 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lrzpt"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.946145 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.946292 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" event={"ID":"6e560d85-bf0c-4604-9f52-d46fe96b6fe7","Type":"ContainerStarted","Data":"758d20cb39318e970e5705358100b24f05aedc609cc5eae03c2cef346be887bc"} Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.950523 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd"] Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.958513 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" event={"ID":"1d4846ae-8aeb-4940-adb4-ad6726532d8b","Type":"ContainerStarted","Data":"3b82e66601733cd87dc0a58269c73d547746467ef3a9b39d5cbbecaf7c849753"} Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.966525 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" event={"ID":"bee2f9e8-49ac-4ead-91c3-2ac4138eb042","Type":"ContainerStarted","Data":"faec365345842a3890881446f70c8ab5181a6f4f20b64d381a2685aecbd5d78e"} Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.979485 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-52wql" event={"ID":"041a0a8d-2984-4158-b873-13944248e6ff","Type":"ContainerStarted","Data":"2e2c7780873d4563154471de4ded78747eacb8f0884c33b2b0f1a20d2ebdd456"} Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.980020 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.984463 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" event={"ID":"135e9ec7-c882-4894-a24e-669b09be3f5b","Type":"ContainerStarted","Data":"da995b8a6d0882baa66c7311c2ea8285c4c92662463afd3caeb27a783a8160c2"} Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.992754 4780 patch_prober.go:28] interesting pod/console-operator-58897d9998-52wql container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.992870 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-52wql" podUID="041a0a8d-2984-4158-b873-13944248e6ff" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.997840 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-52wql" podStartSLOduration=149.997815303 podStartE2EDuration="2m29.997815303s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:52.997104744 +0000 UTC m=+177.850498187" watchObservedRunningTime="2025-12-10 10:47:52.997815303 +0000 UTC m=+177.851208746" Dec 10 10:47:52 crc kubenswrapper[4780]: I1210 10:47:52.999446 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" event={"ID":"b431cc6c-6110-4578-8bd2-55f39d1cbe63","Type":"ContainerStarted","Data":"ddc64d69e8187864b53bf100a21f5e19a9700dd0ae40a21269ecfa1785a65204"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.006387 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7vv54"] Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.011806 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" event={"ID":"40ab5014-1713-4cec-8577-58a4573025e8","Type":"ContainerStarted","Data":"7bd45543c40f8fcb59e9be21c50e0abf16436c3c58b4c4636d94df88a078a122"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.020238 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.021257 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.521232551 +0000 UTC m=+178.374625994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.021485 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.021868 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.521859127 +0000 UTC m=+178.375252570 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.022318 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" event={"ID":"cc6463b6-4c8f-48d2-8aca-74626aa632eb","Type":"ContainerStarted","Data":"119c2d11dc9ee6ad58e90d71dc4df128b83362f9691f2ddf498fa027b7531f94"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.024559 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" event={"ID":"0d2560a1-1eb3-4fad-89c4-100985ef6455","Type":"ContainerStarted","Data":"fb09483c59dcac39f6ab7a83b2c2facf82c0a2e3a41354a0ea0a894ccc9121f6"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.026545 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fxxzq" event={"ID":"09fe7cda-3948-484d-bcd9-e83d1ac0610a","Type":"ContainerStarted","Data":"f3fe7064a09359d257a811844ecac11462977e96766c7b9fa3dde7a42d7257ae"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.033562 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" event={"ID":"af48657d-340c-40dd-8c8c-9e4f7250337b","Type":"ContainerStarted","Data":"c28fd5ba3ea0abe475b7c29203695d49e9634ed3815523c6277fd40ccae27fad"} Dec 10 10:47:53 crc kubenswrapper[4780]: W1210 10:47:53.053985 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedec8e8c_042b_46c6_b136_32bf2d144f27.slice/crio-ed8f9424b63c43a52fc45be00bc896f5b561bde688bd3083c303d4477ce7d95f WatchSource:0}: Error finding container ed8f9424b63c43a52fc45be00bc896f5b561bde688bd3083c303d4477ce7d95f: Status 404 returned error can't find the container with id ed8f9424b63c43a52fc45be00bc896f5b561bde688bd3083c303d4477ce7d95f Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.054894 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" event={"ID":"428a3826-00fc-4452-8f22-61d02857b761","Type":"ContainerStarted","Data":"0c9ccec8780bcbde90b405a44ac1dfd888cb324d9343a658b1bbcf31efd84da9"} Dec 10 10:47:53 crc kubenswrapper[4780]: W1210 10:47:53.057081 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2f76bac_1abe_4877_8e25_768778bf5edd.slice/crio-08995149f4dc9dd4bfd222588d7de79c2e6482a4183acf3a68276982959dc31c WatchSource:0}: Error finding container 08995149f4dc9dd4bfd222588d7de79c2e6482a4183acf3a68276982959dc31c: Status 404 returned error can't find the container with id 08995149f4dc9dd4bfd222588d7de79c2e6482a4183acf3a68276982959dc31c Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.097321 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" event={"ID":"5464f7ae-9634-4208-a5f5-3e6299f72639","Type":"ContainerStarted","Data":"77ed95b4af4f91d2f7aa472ee4cbc9405eb84d74771839ec3f70642ac8aaa53d"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.100416 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-2mb96" podStartSLOduration=149.10038383 podStartE2EDuration="2m29.10038383s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.094886524 +0000 UTC m=+177.948279967" watchObservedRunningTime="2025-12-10 10:47:53.10038383 +0000 UTC m=+177.953777283" Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.100843 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2"] Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.113565 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-2dwc9" event={"ID":"c79bfa07-4a71-4560-b706-ac6c81b10ddc","Type":"ContainerStarted","Data":"a6c3b1e9ea662359b1d06586394b675351713263c106da4b91d12fa42f9905fb"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.124653 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.125621 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.625497691 +0000 UTC m=+178.478891134 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.125834 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.127256 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.627235587 +0000 UTC m=+178.480629020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.131500 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" event={"ID":"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc","Type":"ContainerStarted","Data":"0ae2549869e20efa0aa08fc9515777424290bbab77bd5c37cbed943ac24d94c3"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.145722 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" event={"ID":"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99","Type":"ContainerStarted","Data":"25f7b33a716dcc16081da556eddd09bad433ec8ef3a9c1cebbfc73ef3a697e6f"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.147561 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" event={"ID":"52170468-2209-45d0-84f3-d223b1052bf9","Type":"ContainerStarted","Data":"7b4c5341b8852845db04c75b9bb83b4c8240e063ec99a633498c81e7de0e9832"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.162641 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2r7qq"] Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.165621 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" event={"ID":"19f469f0-60aa-4251-88f4-96baafae3f21","Type":"ContainerStarted","Data":"6a8aa777c70e4ca1e71d440846b0437968edec4168a2bbac6a9cadc856c56dfc"} Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.178442 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" event={"ID":"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0","Type":"ContainerStarted","Data":"a5ac1fa28e97299bd290bd224b4ec443221d0f286e9dea5e2e1d426beec61c79"} Dec 10 10:47:53 crc kubenswrapper[4780]: W1210 10:47:53.181959 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90ac2cea_e1c2_479b_8de0_0917f3779a13.slice/crio-d6375a6fa3a106ef34570a653848d6279f16e8038af1421443f222792e68a32d WatchSource:0}: Error finding container d6375a6fa3a106ef34570a653848d6279f16e8038af1421443f222792e68a32d: Status 404 returned error can't find the container with id d6375a6fa3a106ef34570a653848d6279f16e8038af1421443f222792e68a32d Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.182816 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" event={"ID":"dc66eb07-7e9e-4093-9d7c-643164e2c8b7","Type":"ContainerStarted","Data":"61458ebc8c0c461572d01927f56f4792239ea67c61b72663e576a5d58439ca97"} Dec 10 10:47:53 crc kubenswrapper[4780]: W1210 10:47:53.184862 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod007684b0_7499_47ad_b2d4_27051578f4cc.slice/crio-33ca21921b8bc8e0dcfb608ea99dbea557999c23f33b420a13f09cd0d52f00c4 WatchSource:0}: Error finding container 33ca21921b8bc8e0dcfb608ea99dbea557999c23f33b420a13f09cd0d52f00c4: Status 404 returned error can't find the container with id 33ca21921b8bc8e0dcfb608ea99dbea557999c23f33b420a13f09cd0d52f00c4 Dec 10 10:47:53 crc kubenswrapper[4780]: W1210 10:47:53.220497 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod488b5c9e_23f8_47cb_ad7d_40af40abf207.slice/crio-842f4bc20d51f0572619fe62f9259f730f114f750beb53ab8a46e5933b7da01f WatchSource:0}: Error finding container 842f4bc20d51f0572619fe62f9259f730f114f750beb53ab8a46e5933b7da01f: Status 404 returned error can't find the container with id 842f4bc20d51f0572619fe62f9259f730f114f750beb53ab8a46e5933b7da01f Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.221880 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-snzm9"] Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.229160 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.229715 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.729461075 +0000 UTC m=+178.582854518 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.230116 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.230502 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.730483802 +0000 UTC m=+178.583877245 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: W1210 10:47:53.254141 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod657131c5_4c67_4388_bd84_4efa0978219f.slice/crio-adf11648291d7d8f0146671dfebc3f7738801b7ba98527ea9edc0f6737a49fd6 WatchSource:0}: Error finding container adf11648291d7d8f0146671dfebc3f7738801b7ba98527ea9edc0f6737a49fd6: Status 404 returned error can't find the container with id adf11648291d7d8f0146671dfebc3f7738801b7ba98527ea9edc0f6737a49fd6 Dec 10 10:47:53 crc kubenswrapper[4780]: W1210 10:47:53.271431 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc725ccf2_5e48_4267_b43b_df62e400d0d5.slice/crio-d7cc10b16b5c88c778d512890ab7a0ad087e0caa9e98d0b74fd797f55f7e73de WatchSource:0}: Error finding container d7cc10b16b5c88c778d512890ab7a0ad087e0caa9e98d0b74fd797f55f7e73de: Status 404 returned error can't find the container with id d7cc10b16b5c88c778d512890ab7a0ad087e0caa9e98d0b74fd797f55f7e73de Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.331801 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.332028 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.83199619 +0000 UTC m=+178.685389633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.332140 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.333407 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.833387617 +0000 UTC m=+178.686781060 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.433361 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.433666 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.933633412 +0000 UTC m=+178.787027025 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.434033 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.434472 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.934454454 +0000 UTC m=+178.787847897 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.727871 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.729780 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.229758756 +0000 UTC m=+179.083152199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.804337 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:53 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:47:53 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:47:53 crc kubenswrapper[4780]: healthz check failed Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.804962 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.831020 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.831544 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.331523212 +0000 UTC m=+179.184916655 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4780]: I1210 10:47:53.932317 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4780]: E1210 10:47:53.932806 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.432766633 +0000 UTC m=+179.286160106 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.034415 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.035226 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.535181086 +0000 UTC m=+179.388578069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.135474 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.135817 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.635800281 +0000 UTC m=+179.489193724 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.198025 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-snzm9" event={"ID":"1e936221-208e-4223-a97b-ec48717552d0","Type":"ContainerStarted","Data":"35a380a95e159b941c64b0f32249ef5283bda0f84ee6303ce16a01d6c4c9da9a"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.201411 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-46s5p" event={"ID":"24187953-1dc5-48d7-b00c-1e5876604b6b","Type":"ContainerStarted","Data":"93dce924128552dbe18aae9dac50b16867e4c4e58f05d78af878d9b650559a08"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.202856 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7vv54" event={"ID":"57848ad9-ff93-4d90-9eb5-0825e149694d","Type":"ContainerStarted","Data":"9ca2f17ca0645d9c18f93ef918cfb19056172728d867a101fdb83666e7d7b22b"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.204356 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" event={"ID":"536a1b3e-4cda-4ed7-985a-595c13968356","Type":"ContainerStarted","Data":"f1136568a98c4a91b67688cc8e286f0c686f0976bce135589091655d8ac021be"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.205669 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" event={"ID":"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68","Type":"ContainerStarted","Data":"2700b16f4da13465487c4374e1e6b659ae5a9ecfd8ae136de50e17b66631bb37"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.208156 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" event={"ID":"0d2560a1-1eb3-4fad-89c4-100985ef6455","Type":"ContainerStarted","Data":"858d13f121cb90f2ff79029e5d8b58e10b5ae8651f778357c993c1e2979dbfd2"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.216061 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" event={"ID":"75086894-fa83-41dd-ad4e-cca8d61f0869","Type":"ContainerStarted","Data":"6edfe1b962400d1036fc86d15aa4d54e962f227a970bec677a0975384a435d7d"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.217493 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" event={"ID":"0c750f90-4775-4f44-9edc-53dd41864462","Type":"ContainerStarted","Data":"6b9e358af96582f61f1dd0241a14620b44731911e9cdcd0fb3efe75a42fb83fc"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.221956 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" event={"ID":"edec8e8c-042b-46c6-b136-32bf2d144f27","Type":"ContainerStarted","Data":"ed8f9424b63c43a52fc45be00bc896f5b561bde688bd3083c303d4477ce7d95f"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.223400 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" event={"ID":"90ac2cea-e1c2-479b-8de0-0917f3779a13","Type":"ContainerStarted","Data":"d6375a6fa3a106ef34570a653848d6279f16e8038af1421443f222792e68a32d"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.225578 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" event={"ID":"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b","Type":"ContainerStarted","Data":"11faeb7ace31d85b1d7bf524dbdf305612ee19bf700e076a57eb6e7e660f72fd"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.230969 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-vbvq9" event={"ID":"51f53e78-be26-49e0-b4ad-32de3129ddeb","Type":"ContainerStarted","Data":"027099c0e9e86e04169e52751ca56972208ba80fc17909b4c54416edfb11dbd4"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.234623 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" event={"ID":"007684b0-7499-47ad-b2d4-27051578f4cc","Type":"ContainerStarted","Data":"33ca21921b8bc8e0dcfb608ea99dbea557999c23f33b420a13f09cd0d52f00c4"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.236515 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.236830 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.736816656 +0000 UTC m=+179.590210099 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.239975 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2r7qq" event={"ID":"c725ccf2-5e48-4267-b43b-df62e400d0d5","Type":"ContainerStarted","Data":"d7cc10b16b5c88c778d512890ab7a0ad087e0caa9e98d0b74fd797f55f7e73de"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.241834 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" event={"ID":"657131c5-4c67-4388-bd84-4efa0978219f","Type":"ContainerStarted","Data":"adf11648291d7d8f0146671dfebc3f7738801b7ba98527ea9edc0f6737a49fd6"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.244426 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" event={"ID":"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac","Type":"ContainerStarted","Data":"b4fd45897dd294b7b66d96df7fb9a9f6279d8096cc8fbb28b6204e457c90f707"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.249422 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-vbvq9" podStartSLOduration=12.249368997 podStartE2EDuration="12.249368997s" podCreationTimestamp="2025-12-10 10:47:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.248365841 +0000 UTC m=+179.101759314" watchObservedRunningTime="2025-12-10 10:47:54.249368997 +0000 UTC m=+179.102762480" Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.252147 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" event={"ID":"7753484d-d587-473d-8630-20cefaad3c7c","Type":"ContainerStarted","Data":"c8dfa026d27decb0dba1a5de5d14004de73e581bdb24817621f17cd8a11a7eaa"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.254896 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" event={"ID":"c2f76bac-1abe-4877-8e25-768778bf5edd","Type":"ContainerStarted","Data":"08995149f4dc9dd4bfd222588d7de79c2e6482a4183acf3a68276982959dc31c"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.259134 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" event={"ID":"488b5c9e-23f8-47cb-ad7d-40af40abf207","Type":"ContainerStarted","Data":"842f4bc20d51f0572619fe62f9259f730f114f750beb53ab8a46e5933b7da01f"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.260575 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" event={"ID":"d4217565-e1f8-497f-a961-f7823901afb6","Type":"ContainerStarted","Data":"f8ee22049cb1a0aded8f2919a63a839f58f515db7078b00727ab969ce9c9cdb2"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.262373 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" event={"ID":"a3427a27-b233-4e0f-a9ae-25a0d67c6b65","Type":"ContainerStarted","Data":"5280f3742d817c3f6a84ce92fecd733d1c69c6c4effd5a65c6336ab29b1ed236"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.265460 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" event={"ID":"af48657d-340c-40dd-8c8c-9e4f7250337b","Type":"ContainerStarted","Data":"7aceca8b5b6169b99c514add56e4f5f3b4057c67b38b0091f3635eb0e77fe111"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.268644 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" event={"ID":"dc66eb07-7e9e-4093-9d7c-643164e2c8b7","Type":"ContainerStarted","Data":"a611b43eab12646e750ffdaa438ef6e81fd33f6337a3e613bb4982d810fdb343"} Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.270108 4780 patch_prober.go:28] interesting pod/console-operator-58897d9998-52wql container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.270151 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-52wql" podUID="041a0a8d-2984-4158-b873-13944248e6ff" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.279846 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-ns5px" podStartSLOduration=151.279808691 podStartE2EDuration="2m31.279808691s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.277791667 +0000 UTC m=+179.131185140" watchObservedRunningTime="2025-12-10 10:47:54.279808691 +0000 UTC m=+179.133202154" Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.337606 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.337787 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.83775315 +0000 UTC m=+179.691146593 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.338241 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.339036 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.839015603 +0000 UTC m=+179.692409046 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.442055 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.442816 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.942771941 +0000 UTC m=+179.796165374 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.544076 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.544589 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.044564157 +0000 UTC m=+179.897957710 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.645668 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.646043 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.146001374 +0000 UTC m=+179.999394817 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.646238 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.646778 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.146765044 +0000 UTC m=+180.000158667 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.747865 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.748143 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.248081527 +0000 UTC m=+180.101474970 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.748299 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.748842 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.248829027 +0000 UTC m=+180.102222620 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.816197 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:54 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:47:54 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:47:54 crc kubenswrapper[4780]: healthz check failed Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.816276 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.849515 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.849878 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.349861263 +0000 UTC m=+180.203254706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4780]: I1210 10:47:54.950879 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:54 crc kubenswrapper[4780]: E1210 10:47:54.952415 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.452392518 +0000 UTC m=+180.305785961 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.051822 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4780]: E1210 10:47:55.052348 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.552327776 +0000 UTC m=+180.405721219 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.101690 4780 patch_prober.go:28] interesting pod/console-operator-58897d9998-52wql container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.101761 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-52wql" podUID="041a0a8d-2984-4158-b873-13944248e6ff" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.102085 4780 patch_prober.go:28] interesting pod/console-operator-58897d9998-52wql container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.102138 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-52wql" podUID="041a0a8d-2984-4158-b873-13944248e6ff" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/readyz\": dial tcp 10.217.0.23:8443: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.153721 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:55 crc kubenswrapper[4780]: E1210 10:47:55.154262 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.654240375 +0000 UTC m=+180.507633808 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.256039 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4780]: E1210 10:47:55.256285 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.756252107 +0000 UTC m=+180.609645550 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.256493 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:55 crc kubenswrapper[4780]: E1210 10:47:55.257007 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.756986316 +0000 UTC m=+180.610379769 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.273758 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" event={"ID":"1d4846ae-8aeb-4940-adb4-ad6726532d8b","Type":"ContainerStarted","Data":"4a444905ab77c6de5a1dd1658dc430ec0bf6a61ac9ea262b28ca4fc983975a14"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.275028 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" event={"ID":"cc6463b6-4c8f-48d2-8aca-74626aa632eb","Type":"ContainerStarted","Data":"d01182ca52ee33f8cc1a48dba593fa9024637507e2dec008d80a1b5fa2818d2f"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.276040 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" event={"ID":"bee2f9e8-49ac-4ead-91c3-2ac4138eb042","Type":"ContainerStarted","Data":"307f4665e414c81d7071b9f633fb6a206c05283acd1243f6faae9168a22d1290"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.277292 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" event={"ID":"40ab5014-1713-4cec-8577-58a4573025e8","Type":"ContainerStarted","Data":"1e75dd2c32d816d1d14add8a72b632abc0685bd7548e263d39467c52eacc72a7"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.278614 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" event={"ID":"52170468-2209-45d0-84f3-d223b1052bf9","Type":"ContainerStarted","Data":"bacfc69bf36949cc2cc376fefa4bb2719473db78b250450b7a3a54d2f28864f9"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.279898 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" event={"ID":"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99","Type":"ContainerStarted","Data":"452275bf42979dd853254a2b1fa1a297c6e673d26d188431d6ff7a0e7393a91c"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.281273 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.283031 4780 generic.go:334] "Generic (PLEG): container finished" podID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerID="c217207c4885a93d80dc709c348c81f17698af1db535da141db5e33af64e1360" exitCode=0 Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.283099 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" event={"ID":"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc","Type":"ContainerDied","Data":"c217207c4885a93d80dc709c348c81f17698af1db535da141db5e33af64e1360"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.283986 4780 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-fw9bc container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.284141 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.286225 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fxxzq" event={"ID":"09fe7cda-3948-484d-bcd9-e83d1ac0610a","Type":"ContainerStarted","Data":"deb50716f11ade378405f94e5dfb23374756041d9247c96b96f862d026d8a63e"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.287825 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" event={"ID":"b431cc6c-6110-4578-8bd2-55f39d1cbe63","Type":"ContainerStarted","Data":"4e1d641469ed55f74af9d0ea43520bf1480a57261dc7cb8a9a09d52aa1700244"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.289560 4780 generic.go:334] "Generic (PLEG): container finished" podID="135e9ec7-c882-4894-a24e-669b09be3f5b" containerID="4de8616eb9564068faf145670acb16fb9eee7eaa9b647a7e3aca74bf5b5509db" exitCode=0 Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.289833 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" event={"ID":"135e9ec7-c882-4894-a24e-669b09be3f5b","Type":"ContainerDied","Data":"4de8616eb9564068faf145670acb16fb9eee7eaa9b647a7e3aca74bf5b5509db"} Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.290677 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.293454 4780 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-njfgs container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.293501 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" podUID="0d2560a1-1eb3-4fad-89c4-100985ef6455" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.311455 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" podStartSLOduration=151.311428302 podStartE2EDuration="2m31.311428302s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.30753962 +0000 UTC m=+180.160933073" watchObservedRunningTime="2025-12-10 10:47:55.311428302 +0000 UTC m=+180.164821745" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.329604 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" podStartSLOduration=152.329578411 podStartE2EDuration="2m32.329578411s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.327044975 +0000 UTC m=+180.180438418" watchObservedRunningTime="2025-12-10 10:47:55.329578411 +0000 UTC m=+180.182971854" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.361760 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4780]: E1210 10:47:55.362799 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.862746957 +0000 UTC m=+180.716140530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.373298 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-hgp6g" podStartSLOduration=153.373267944 podStartE2EDuration="2m33.373267944s" podCreationTimestamp="2025-12-10 10:45:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.372630197 +0000 UTC m=+180.226023640" watchObservedRunningTime="2025-12-10 10:47:55.373267944 +0000 UTC m=+180.226661387" Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.464154 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:55 crc kubenswrapper[4780]: E1210 10:47:55.464594 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.964577224 +0000 UTC m=+180.817970667 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.857343 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4780]: E1210 10:47:55.858223 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.3581844 +0000 UTC m=+181.211577843 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.870289 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:55 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:47:55 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:47:55 crc kubenswrapper[4780]: healthz check failed Dec 10 10:47:55 crc kubenswrapper[4780]: I1210 10:47:55.870379 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.051520 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.052226 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.552208023 +0000 UTC m=+181.405601466 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.303413 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.313560 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.813527776 +0000 UTC m=+181.666921219 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.412182 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.413597 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.913552085 +0000 UTC m=+181.766945528 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.477861 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" event={"ID":"5464f7ae-9634-4208-a5f5-3e6299f72639","Type":"ContainerStarted","Data":"810d13b5ab55e692162e9029644b0e6c43c642a5840a5d5d432ed921e3f026a9"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.495101 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" event={"ID":"657131c5-4c67-4388-bd84-4efa0978219f","Type":"ContainerStarted","Data":"032daf029c0aaa08a0b605cc27c21976aad6fc5f7e36d505f2ebd91b3c99081c"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.526299 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.527636 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.027614455 +0000 UTC m=+181.881007888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.534630 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" event={"ID":"5d5f57c6-f7e1-4bda-82b0-0a6f1eb7e4ac","Type":"ContainerStarted","Data":"053aedc1f5924e24aea34ead722ad52df795f5129a3cda0baf874ea70a4e5533"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.536015 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-46s5p" event={"ID":"24187953-1dc5-48d7-b00c-1e5876604b6b","Type":"ContainerStarted","Data":"3a5051c721e1f08e7ef01c2ac8e80351af2c28ec78065f77149afdf17777ae48"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.539864 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" event={"ID":"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b","Type":"ContainerStarted","Data":"0b51030ef61c51d5b7cae61b2868aa365ebf7b53b5616329560b6cdb92863b83"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.543366 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" event={"ID":"dc66eb07-7e9e-4093-9d7c-643164e2c8b7","Type":"ContainerStarted","Data":"a653c484efa4df87612d2c4a4412ad1c1dce5be5bbacafa05005ba13ef734879"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.548824 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2r7qq" event={"ID":"c725ccf2-5e48-4267-b43b-df62e400d0d5","Type":"ContainerStarted","Data":"f33e7be403df4aee64c924c40320202c9a72ca377ea3c85eac7e086ab029d00d"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.553712 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-2dwc9" event={"ID":"c79bfa07-4a71-4560-b706-ac6c81b10ddc","Type":"ContainerStarted","Data":"5e17e2d0b7cd069de3f55f9447d591c50652d6787ef5f6718ef18da28ac0e4f8"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.555965 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" event={"ID":"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68","Type":"ContainerStarted","Data":"c50d12670d80b840d0a35a979b0d4745c962ebd4196ec5a5a1bdd46e41acba4a"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.557957 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" event={"ID":"6e560d85-bf0c-4604-9f52-d46fe96b6fe7","Type":"ContainerStarted","Data":"c4baa2f445f3ead4e029b7dd021df8730d2bd5ca75018ef42bd3db67cff605f7"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.558454 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.561879 4780 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qllmj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" start-of-body= Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.561971 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.630366 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" event={"ID":"90ac2cea-e1c2-479b-8de0-0917f3779a13","Type":"ContainerStarted","Data":"e6e7b0ed6d645f4c93f46695bf63bb1ece776bfbdb34f2e6a769691fe1f2a252"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.630661 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.633646 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.634376 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.134348291 +0000 UTC m=+181.987741734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.635159 4780 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lrzpt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.635243 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.692681 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" event={"ID":"1ef48cf1-e8b0-491d-bcd8-5e6ca70702d0","Type":"ContainerStarted","Data":"efb7418abb07e825f7a684cc6745dc5ca273dab0b70f24e08584e2ab63e50306"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.694885 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" event={"ID":"007684b0-7499-47ad-b2d4-27051578f4cc","Type":"ContainerStarted","Data":"54ee54268482cc1b9fbb1848f7e1b23615ac3da9ca225a93b5136212d2906568"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.698088 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.698236 4780 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-st68f container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.698288 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" podUID="007684b0-7499-47ad-b2d4-27051578f4cc" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.707384 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" event={"ID":"7753484d-d587-473d-8630-20cefaad3c7c","Type":"ContainerStarted","Data":"8c2be9ec1fa95e57cffe2f9fd14f60af395bdca1f7568076a9af2b061016c6bf"} Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.734866 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.735151 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.235105799 +0000 UTC m=+182.088499242 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.735742 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.748021 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.247991649 +0000 UTC m=+182.101385272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.810658 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.854296 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.854783 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.354764326 +0000 UTC m=+182.208157769 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.854933 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.859139 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.359104401 +0000 UTC m=+182.212497994 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.907271 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:56 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:47:56 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:47:56 crc kubenswrapper[4780]: healthz check failed Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.907367 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.971154 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4780]: E1210 10:47:56.971588 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.471570349 +0000 UTC m=+182.324963792 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4780]: I1210 10:47:56.971837 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" event={"ID":"488b5c9e-23f8-47cb-ad7d-40af40abf207","Type":"ContainerStarted","Data":"6e0aae642872819f88d5f5b63860765e4a7b5b15a47ca15d5221e4e1384e164a"} Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.003444 4780 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lrzpt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.003906 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.004085 4780 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lrzpt container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.004117 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.012967 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" event={"ID":"c2f76bac-1abe-4877-8e25-768778bf5edd","Type":"ContainerStarted","Data":"be7e1b6577a8d6284f5e53aa16218726c57b8594cd8d6ea654580b32804a45a6"} Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.038637 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" event={"ID":"0c750f90-4775-4f44-9edc-53dd41864462","Type":"ContainerStarted","Data":"fc8418127216a4ea9088255ddde36775f4ce39bb5c1ab4ba2d5d7d6eb58da417"} Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.073424 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:57 crc kubenswrapper[4780]: E1210 10:47:57.076114 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.576082267 +0000 UTC m=+182.429475710 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.182836 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:57 crc kubenswrapper[4780]: E1210 10:47:57.183904 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.683887131 +0000 UTC m=+182.537280574 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.295160 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:57 crc kubenswrapper[4780]: E1210 10:47:57.296548 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.796531584 +0000 UTC m=+182.649925027 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.333191 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-snzm9" event={"ID":"1e936221-208e-4223-a97b-ec48717552d0","Type":"ContainerStarted","Data":"842697febe1c4198d47e6445fcae3de8507c262a82f49aadc3ff9c71fd6534f1"} Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.360765 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" event={"ID":"a3427a27-b233-4e0f-a9ae-25a0d67c6b65","Type":"ContainerStarted","Data":"0d366e0f355e45d4a128adc528fca005444fcccfd9ea1df69d5c2cf3ff6219f3"} Dec 10 10:47:57 crc kubenswrapper[4780]: E1210 10:47:57.444366 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.944317634 +0000 UTC m=+182.797711077 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.444495 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.444887 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:57 crc kubenswrapper[4780]: E1210 10:47:57.445371 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.945358301 +0000 UTC m=+182.798751754 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.484905 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.484992 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.545902 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:57 crc kubenswrapper[4780]: E1210 10:47:57.546505 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.046482389 +0000 UTC m=+182.899875832 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.583380 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.743532 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:57 crc kubenswrapper[4780]: E1210 10:47:57.744258 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.244239128 +0000 UTC m=+183.097632571 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.766169 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.767966 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" event={"ID":"75086894-fa83-41dd-ad4e-cca8d61f0869","Type":"ContainerStarted","Data":"96c28c9d6a99262ee95a2a2a997d269d8c3b8fba59335c079ba0dafffa88cb15"} Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.768331 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.775484 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" event={"ID":"19f469f0-60aa-4251-88f4-96baafae3f21","Type":"ContainerStarted","Data":"6cbcc30a9f2b4e4b54921a191925e21b8b9b3e359d14e7b2e40a4eb6da3b5dfc"} Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.777505 4780 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-njfgs container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.777622 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" podUID="0d2560a1-1eb3-4fad-89c4-100985ef6455" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.778280 4780 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-fw9bc container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.778681 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.939632 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.940777 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.941181 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.942953 4780 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-csjzj container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Dec 10 10:47:57 crc kubenswrapper[4780]: I1210 10:47:57.943009 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" podUID="75086894-fa83-41dd-ad4e-cca8d61f0869" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:57.991465 4780 patch_prober.go:28] interesting pod/apiserver-76f77b778f-9hgsw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:57.991572 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" podUID="5464f7ae-9634-4208-a5f5-3e6299f72639" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:57.997497 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.497463552 +0000 UTC m=+183.350857165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.044148 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:58 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:47:58 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:47:58 crc kubenswrapper[4780]: healthz check failed Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.044232 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.071279 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:58.072232 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.572198382 +0000 UTC m=+183.425591825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.265316 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:58.266202 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.7661756 +0000 UTC m=+183.619569043 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.288884 4780 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-st68f container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.288973 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" podUID="007684b0-7499-47ad-b2d4-27051578f4cc" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.289051 4780 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-st68f container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.289071 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" podUID="007684b0-7499-47ad-b2d4-27051578f4cc" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.367368 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:58.369980 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.869958619 +0000 UTC m=+183.723352052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.547457 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:58.548079 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.048049278 +0000 UTC m=+183.901442731 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.649065 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:58.650084 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.15006696 +0000 UTC m=+184.003460403 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.750526 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:58.751092 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.251074676 +0000 UTC m=+184.104468119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.765312 4780 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-csjzj container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.765371 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" podUID="75086894-fa83-41dd-ad4e-cca8d61f0869" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.765450 4780 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-csjzj container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.765477 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" podUID="75086894-fa83-41dd-ad4e-cca8d61f0869" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.801664 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:58 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:47:58 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:47:58 crc kubenswrapper[4780]: healthz check failed Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.801717 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.812335 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" event={"ID":"1d4846ae-8aeb-4940-adb4-ad6726532d8b","Type":"ContainerStarted","Data":"f1b9015f730064bbb71fe6a82bac9ac44377e8018f2257e9c5ad0b054514d514"} Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.841004 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" event={"ID":"b431cc6c-6110-4578-8bd2-55f39d1cbe63","Type":"ContainerStarted","Data":"a2bdd455fb01a3e882e373cf38ac3f06bd63ce33218c11ac0f6336b0f45dd98b"} Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.852313 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:58.852812 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.35279501 +0000 UTC m=+184.206188473 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.859367 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" event={"ID":"edec8e8c-042b-46c6-b136-32bf2d144f27","Type":"ContainerStarted","Data":"627d271b62d3fbd3711a3519713b8c6e0b2adff48fd98b00ff7d8a7e66ab34ec"} Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.879354 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" event={"ID":"ee82ec7a-8f0c-4576-aad4-e7686fc8ca5b","Type":"ContainerStarted","Data":"4aa2c7a01a20d052b583b3f331696f8be3bc36bbf6197ec715e5ce36c33f4fbf"} Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.917490 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" event={"ID":"a3427a27-b233-4e0f-a9ae-25a0d67c6b65","Type":"ContainerStarted","Data":"3b0ba3afbf3a03890e7e213b19237cdf3d82f363e05ade41836528713cbf8013"} Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.927845 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" event={"ID":"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc","Type":"ContainerStarted","Data":"5af94e3d01ed1885e1fac4fa2df29f95bf242797062a1e13799f3ef62e005536"} Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.953177 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4780]: E1210 10:47:58.955785 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.455749246 +0000 UTC m=+184.309142689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.958740 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" event={"ID":"536a1b3e-4cda-4ed7-985a-595c13968356","Type":"ContainerStarted","Data":"9a6dcf7d26cfc3d7057fe07bbb5f952e4b4522fe78fd34e23be5869efd50de11"} Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.959613 4780 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-fw9bc container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.959691 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.962648 4780 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lrzpt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.962691 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.964103 4780 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qllmj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4780]: I1210 10:47:58.964215 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.074460 4780 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-csjzj container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.074561 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" podUID="75086894-fa83-41dd-ad4e-cca8d61f0869" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.075612 4780 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-st68f container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.075654 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" podUID="007684b0-7499-47ad-b2d4-27051578f4cc" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.080794 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:59 crc kubenswrapper[4780]: E1210 10:47:59.081576 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.581551166 +0000 UTC m=+184.434944609 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.201220 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4780]: E1210 10:47:59.201958 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.701868681 +0000 UTC m=+184.555262264 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.202903 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:59 crc kubenswrapper[4780]: E1210 10:47:59.214859 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.714822793 +0000 UTC m=+184.568216396 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.307532 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4780]: E1210 10:47:59.308511 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.808487674 +0000 UTC m=+184.661881117 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.409771 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:47:59 crc kubenswrapper[4780]: E1210 10:47:59.410335 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.910315441 +0000 UTC m=+184.763708884 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.871493 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4780]: E1210 10:47:59.871903 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.371884779 +0000 UTC m=+185.225278222 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.884810 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:59 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:47:59 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:47:59 crc kubenswrapper[4780]: healthz check failed Dec 10 10:47:59 crc kubenswrapper[4780]: I1210 10:47:59.884898 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.376432 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.376611 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.378825 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.381083 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.381143 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:00 crc kubenswrapper[4780]: E1210 10:48:00.389381 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.389324729 +0000 UTC m=+186.242718172 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.480129 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-snzm9" event={"ID":"1e936221-208e-4223-a97b-ec48717552d0","Type":"ContainerStarted","Data":"fa582fa0d55982f3d2d5483591d3ae275a7cba5ba4a26d1f16e2316452683b7e"} Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.487561 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:00 crc kubenswrapper[4780]: E1210 10:48:00.488258 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.988235765 +0000 UTC m=+185.841629208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.504510 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.505505 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.514978 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.515856 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.522949 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" event={"ID":"edec8e8c-042b-46c6-b136-32bf2d144f27","Type":"ContainerStarted","Data":"9ffe2c38f050fef1c2ec45d5106be4dfb618c269cecd1d3c6a99c0c59daa9e9f"} Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.524255 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-46s5p" event={"ID":"24187953-1dc5-48d7-b00c-1e5876604b6b","Type":"ContainerStarted","Data":"7532da0de1c7ac7f66f88bbc706cb451e369eac2d3e65b8d7b67788c08d6da13"} Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.525125 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7vv54" event={"ID":"57848ad9-ff93-4d90-9eb5-0825e149694d","Type":"ContainerStarted","Data":"5531e9183169d333995a732d26f0a31a15a39920ddc28341bbf0f441bbcfd793"} Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.526541 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" event={"ID":"135e9ec7-c882-4894-a24e-669b09be3f5b","Type":"ContainerStarted","Data":"3f3dc32982e41476e79de198a6bdcb6670ed50fdac9601ae5d907f80334f4abd"} Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.531530 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" event={"ID":"657131c5-4c67-4388-bd84-4efa0978219f","Type":"ContainerStarted","Data":"b8d812163efabc3b811f91700aa752707ee165650283bb8e33cef69058b2e53e"} Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.532421 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.542093 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.828277 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.828528 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5885af1-d160-445e-9c10-fb97487ef916-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"d5885af1-d160-445e-9c10-fb97487ef916\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.828552 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5885af1-d160-445e-9c10-fb97487ef916-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"d5885af1-d160-445e-9c10-fb97487ef916\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:00 crc kubenswrapper[4780]: E1210 10:48:00.828666 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.328641146 +0000 UTC m=+186.182034589 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.832805 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:00 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:00 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:00 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.832853 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.850024 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" event={"ID":"ebdf6b64-3c9b-4207-a4d3-3fc75ef78d68","Type":"ContainerStarted","Data":"4c1a631b066279d8ba1b4e303de9df190cedd7c1b1e68c0974e6d1b1ee7fe459"} Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.853956 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.854033 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.929866 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:00 crc kubenswrapper[4780]: E1210 10:48:00.930553 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.430528565 +0000 UTC m=+186.283922168 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.931323 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5885af1-d160-445e-9c10-fb97487ef916-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"d5885af1-d160-445e-9c10-fb97487ef916\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.931373 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5885af1-d160-445e-9c10-fb97487ef916-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"d5885af1-d160-445e-9c10-fb97487ef916\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:00 crc kubenswrapper[4780]: I1210 10:48:00.937148 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5885af1-d160-445e-9c10-fb97487ef916-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"d5885af1-d160-445e-9c10-fb97487ef916\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:01 crc kubenswrapper[4780]: I1210 10:48:01.126122 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:01 crc kubenswrapper[4780]: E1210 10:48:01.127690 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.627571614 +0000 UTC m=+186.480965057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:01 crc kubenswrapper[4780]: I1210 10:48:01.715113 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:01 crc kubenswrapper[4780]: E1210 10:48:01.730668 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:02.730563496 +0000 UTC m=+187.583956939 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:01 crc kubenswrapper[4780]: I1210 10:48:01.731077 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:01 crc kubenswrapper[4780]: E1210 10:48:01.752984 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:02.252856294 +0000 UTC m=+187.106249737 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:01 crc kubenswrapper[4780]: I1210 10:48:01.834147 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:02 crc kubenswrapper[4780]: E1210 10:48:01.835241 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:02.335209487 +0000 UTC m=+187.188602930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.139301 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5885af1-d160-445e-9c10-fb97487ef916-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"d5885af1-d160-445e-9c10-fb97487ef916\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.140872 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:02 crc kubenswrapper[4780]: E1210 10:48:02.142683 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:02.64266385 +0000 UTC m=+187.496057293 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.146337 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.224094 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:02 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:02 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:02 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.224318 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.481383 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:02 crc kubenswrapper[4780]: E1210 10:48:02.483451 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:02.983417362 +0000 UTC m=+187.836810805 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.543376 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-snzm9" Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.677243 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-zvngr" podStartSLOduration=160.677126793 podStartE2EDuration="2m40.677126793s" podCreationTimestamp="2025-12-10 10:45:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:02.230806406 +0000 UTC m=+187.084199859" watchObservedRunningTime="2025-12-10 10:48:02.677126793 +0000 UTC m=+187.530520236" Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.728436 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-2dwc9" podStartSLOduration=159.728392116 podStartE2EDuration="2m39.728392116s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:02.571212769 +0000 UTC m=+187.424606222" watchObservedRunningTime="2025-12-10 10:48:02.728392116 +0000 UTC m=+187.581785559" Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.761172 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:02 crc kubenswrapper[4780]: E1210 10:48:02.869055 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:03.369016207 +0000 UTC m=+188.222409650 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.870442 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:02 crc kubenswrapper[4780]: E1210 10:48:02.918056 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:03.41799848 +0000 UTC m=+188.271391923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:02 crc kubenswrapper[4780]: I1210 10:48:02.920319 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.294172 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b6pdb" podStartSLOduration=160.294141135 podStartE2EDuration="2m40.294141135s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:02.729370112 +0000 UTC m=+187.582763555" watchObservedRunningTime="2025-12-10 10:48:03.294141135 +0000 UTC m=+188.147534578" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.294738 4780 patch_prober.go:28] interesting pod/apiserver-76f77b778f-9hgsw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.294938 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:03.794897225 +0000 UTC m=+188.648290668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.294942 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" podUID="5464f7ae-9634-4208-a5f5-3e6299f72639" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.295973 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.296047 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.297254 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.298640 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.298759 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.298788 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:03.798739566 +0000 UTC m=+188.652133179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.304260 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:03 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:03 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:03 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.304343 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.398569 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.399136 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:03.899112245 +0000 UTC m=+188.752505688 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.469501 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-skxsg" podStartSLOduration=159.469465881 podStartE2EDuration="2m39.469465881s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:03.38075626 +0000 UTC m=+188.234149703" watchObservedRunningTime="2025-12-10 10:48:03.469465881 +0000 UTC m=+188.322859324" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.509905 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.510494 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.010466522 +0000 UTC m=+188.863859965 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.549430 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-lmmsq" podStartSLOduration=160.54939919 podStartE2EDuration="2m40.54939919s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:03.470405075 +0000 UTC m=+188.323798518" watchObservedRunningTime="2025-12-10 10:48:03.54939919 +0000 UTC m=+188.402792633" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.611759 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.650784 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.150746284 +0000 UTC m=+189.004139727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.713894 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.714150 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.214103446 +0000 UTC m=+189.067496889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.714827 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.715455 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.215439991 +0000 UTC m=+189.068833614 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.833383 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.833716 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.333690571 +0000 UTC m=+189.187084014 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.834314 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.834758 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.334746259 +0000 UTC m=+189.188139882 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.837835 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:03 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:03 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:03 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.837911 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.850583 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" podStartSLOduration=161.850545796 podStartE2EDuration="2m41.850545796s" podCreationTimestamp="2025-12-10 10:45:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:03.844940348 +0000 UTC m=+188.698333791" watchObservedRunningTime="2025-12-10 10:48:03.850545796 +0000 UTC m=+188.703939259" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.851090 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rlc8b" podStartSLOduration=159.85107675 podStartE2EDuration="2m39.85107675s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:03.548222988 +0000 UTC m=+188.401616421" watchObservedRunningTime="2025-12-10 10:48:03.85107675 +0000 UTC m=+188.704470193" Dec 10 10:48:03 crc kubenswrapper[4780]: I1210 10:48:03.973659 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:03 crc kubenswrapper[4780]: E1210 10:48:03.974729 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.474693561 +0000 UTC m=+189.328087004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.021738 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" podStartSLOduration=160.021711832 podStartE2EDuration="2m40.021711832s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:04.021411964 +0000 UTC m=+188.874805407" watchObservedRunningTime="2025-12-10 10:48:04.021711832 +0000 UTC m=+188.875105275" Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.124789 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:04 crc kubenswrapper[4780]: E1210 10:48:04.125566 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.625537372 +0000 UTC m=+189.478930815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.220055 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" podStartSLOduration=160.220010915 podStartE2EDuration="2m40.220010915s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:04.217329824 +0000 UTC m=+189.070723297" watchObservedRunningTime="2025-12-10 10:48:04.220010915 +0000 UTC m=+189.073404348" Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.275805 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:04 crc kubenswrapper[4780]: E1210 10:48:04.276499 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.776473844 +0000 UTC m=+189.629867287 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.373976 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-2r7qq" podStartSLOduration=22.373945367 podStartE2EDuration="22.373945367s" podCreationTimestamp="2025-12-10 10:47:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:04.37218437 +0000 UTC m=+189.225577823" watchObservedRunningTime="2025-12-10 10:48:04.373945367 +0000 UTC m=+189.227338810" Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.383253 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:04 crc kubenswrapper[4780]: E1210 10:48:04.383863 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:04.883843568 +0000 UTC m=+189.737237011 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.568343 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:04 crc kubenswrapper[4780]: E1210 10:48:04.568723 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:05.068704276 +0000 UTC m=+189.922097719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.691552 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-56x4k" podStartSLOduration=161.691508897 podStartE2EDuration="2m41.691508897s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:04.52832276 +0000 UTC m=+189.381716203" watchObservedRunningTime="2025-12-10 10:48:04.691508897 +0000 UTC m=+189.544902340" Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.696500 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:04 crc kubenswrapper[4780]: E1210 10:48:04.696984 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:05.196963061 +0000 UTC m=+190.050356504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.713158 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" podStartSLOduration=160.713088266 podStartE2EDuration="2m40.713088266s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:04.694261999 +0000 UTC m=+189.547655442" watchObservedRunningTime="2025-12-10 10:48:04.713088266 +0000 UTC m=+189.566481709" Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.809191 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:04 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:04 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:04 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.809630 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.816456 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:04 crc kubenswrapper[4780]: E1210 10:48:04.816909 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:05.316887875 +0000 UTC m=+190.170281318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:04 crc kubenswrapper[4780]: I1210 10:48:04.834614 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-6tqmn" podStartSLOduration=161.834467439 podStartE2EDuration="2m41.834467439s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:04.829655822 +0000 UTC m=+189.683049265" watchObservedRunningTime="2025-12-10 10:48:04.834467439 +0000 UTC m=+189.687860892" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.015111 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-dgx4k" podStartSLOduration=162.015080325 podStartE2EDuration="2m42.015080325s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:05.012069025 +0000 UTC m=+189.865462478" watchObservedRunningTime="2025-12-10 10:48:05.015080325 +0000 UTC m=+189.868473768" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.057283 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:05 crc kubenswrapper[4780]: E1210 10:48:05.057782 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:05.557765091 +0000 UTC m=+190.411158534 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.111526 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podStartSLOduration=161.111492469 podStartE2EDuration="2m41.111492469s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:05.103589121 +0000 UTC m=+189.956982554" watchObservedRunningTime="2025-12-10 10:48:05.111492469 +0000 UTC m=+189.964885922" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.120989 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-52wql" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.162759 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:05 crc kubenswrapper[4780]: E1210 10:48:05.164019 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:05.663986064 +0000 UTC m=+190.517379507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.164136 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.168296 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.187997 4780 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-dgfj7 container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.188131 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" podUID="135e9ec7-c882-4894-a24e-669b09be3f5b" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.198309 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.204528 4780 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-njfgs container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.204603 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" podUID="0d2560a1-1eb3-4fad-89c4-100985ef6455" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.205010 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.205052 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.205262 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.205296 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.205478 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.205507 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.217053 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jmkwd" podStartSLOduration=161.217024354 podStartE2EDuration="2m41.217024354s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:05.216409287 +0000 UTC m=+190.069802730" watchObservedRunningTime="2025-12-10 10:48:05.217024354 +0000 UTC m=+190.070417797" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.287176 4780 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-fw9bc container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.287275 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Dec 10 10:48:05 crc kubenswrapper[4780]: E1210 10:48:05.288938 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:05.78889908 +0000 UTC m=+190.642292523 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.289360 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.391309 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:05 crc kubenswrapper[4780]: E1210 10:48:05.395009 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:05.89498362 +0000 UTC m=+190.748377063 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.612211 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podStartSLOduration=162.612175011 podStartE2EDuration="2m42.612175011s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:05.364382592 +0000 UTC m=+190.217776045" watchObservedRunningTime="2025-12-10 10:48:05.612175011 +0000 UTC m=+190.465568454" Dec 10 10:48:05 crc kubenswrapper[4780]: I1210 10:48:05.615152 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:05 crc kubenswrapper[4780]: E1210 10:48:05.615864 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:06.115834337 +0000 UTC m=+190.969227780 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.207942 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.208031 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.210074 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:06 crc kubenswrapper[4780]: E1210 10:48:06.210568 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.21054851 +0000 UTC m=+192.063941953 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.210975 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.211013 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.212639 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nfr2m" podStartSLOduration=163.212613305 podStartE2EDuration="2m43.212613305s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:06.209534693 +0000 UTC m=+191.062928136" watchObservedRunningTime="2025-12-10 10:48:06.212613305 +0000 UTC m=+191.066006748" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.213908 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" podStartSLOduration=162.213899199 podStartE2EDuration="2m42.213899199s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:05.614480741 +0000 UTC m=+190.467874204" watchObservedRunningTime="2025-12-10 10:48:06.213899199 +0000 UTC m=+191.067292642" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.214991 4780 patch_prober.go:28] interesting pod/console-f9d7485db-2dwc9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.35:8443/health\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.215095 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-2dwc9" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.35:8443/health\": dial tcp 10.217.0.35:8443: connect: connection refused" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.249318 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:06 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:06 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:06 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.249411 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.277025 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.277085 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.277099 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.277627 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"5af94e3d01ed1885e1fac4fa2df29f95bf242797062a1e13799f3ef62e005536"} pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.278027 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" containerID="cri-o://5af94e3d01ed1885e1fac4fa2df29f95bf242797062a1e13799f3ef62e005536" gracePeriod=30 Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.279459 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.279531 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.352895 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:06 crc kubenswrapper[4780]: E1210 10:48:06.354221 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:06.854194591 +0000 UTC m=+191.707588034 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.461501 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:06 crc kubenswrapper[4780]: E1210 10:48:06.462205 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:06.9621802 +0000 UTC m=+191.815573643 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.569164 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:06 crc kubenswrapper[4780]: E1210 10:48:06.570347 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.070310633 +0000 UTC m=+191.923704076 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.671273 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:06 crc kubenswrapper[4780]: E1210 10:48:06.671819 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.171792131 +0000 UTC m=+192.025185574 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.721790 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.730321 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-6z8d8" podStartSLOduration=162.730282395 podStartE2EDuration="2m42.730282395s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:06.712168507 +0000 UTC m=+191.565561950" watchObservedRunningTime="2025-12-10 10:48:06.730282395 +0000 UTC m=+191.583675838" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.741207 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nmvlq" podStartSLOduration=163.741182032 podStartE2EDuration="2m43.741182032s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:06.739622531 +0000 UTC m=+191.593015974" watchObservedRunningTime="2025-12-10 10:48:06.741182032 +0000 UTC m=+191.594575475" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.773026 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:06 crc kubenswrapper[4780]: E1210 10:48:06.773504 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.273484685 +0000 UTC m=+192.126878128 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.778791 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c8dbv" podStartSLOduration=164.778768384 podStartE2EDuration="2m44.778768384s" podCreationTimestamp="2025-12-10 10:45:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:06.776131704 +0000 UTC m=+191.629525148" watchObservedRunningTime="2025-12-10 10:48:06.778768384 +0000 UTC m=+191.632161827" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.797567 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" podStartSLOduration=163.797536159 podStartE2EDuration="2m43.797536159s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:06.795129096 +0000 UTC m=+191.648522539" watchObservedRunningTime="2025-12-10 10:48:06.797536159 +0000 UTC m=+191.650929602" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.819332 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:06 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:06 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:06 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.819431 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.845706 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" podStartSLOduration=164.84567835000001 podStartE2EDuration="2m44.84567835s" podCreationTimestamp="2025-12-10 10:45:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:06.842624949 +0000 UTC m=+191.696018402" watchObservedRunningTime="2025-12-10 10:48:06.84567835 +0000 UTC m=+191.699071783" Dec 10 10:48:06 crc kubenswrapper[4780]: I1210 10:48:06.938227 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:06 crc kubenswrapper[4780]: E1210 10:48:06.939076 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.439054894 +0000 UTC m=+192.292448327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.020307 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.092305 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.093470 4780 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-4sfhd container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.093540 4780 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-4sfhd container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.093545 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" podUID="7753484d-d587-473d-8630-20cefaad3c7c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.093611 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" podUID="7753484d-d587-473d-8630-20cefaad3c7c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.093995 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.593944701 +0000 UTC m=+192.447338144 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.094063 4780 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lrzpt container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.094115 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.094205 4780 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lrzpt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.094384 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.094878 4780 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-4sfhd container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.094958 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" podUID="7753484d-d587-473d-8630-20cefaad3c7c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.100839 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-x9cz8" podStartSLOduration=164.100800022 podStartE2EDuration="2m44.100800022s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:07.095197804 +0000 UTC m=+191.948591247" watchObservedRunningTime="2025-12-10 10:48:07.100800022 +0000 UTC m=+191.954193465" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.117757 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-snzm9" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.196594 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.199644 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.699614999 +0000 UTC m=+192.553008662 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.213482 4780 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qllmj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.41:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.213574 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.41:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.391813 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.393304 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.89328025 +0000 UTC m=+192.746673693 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.400303 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-7777fb866f-q6q9q_027ecd1e-0802-4c3a-b42a-4e272ee3f6fc/openshift-config-operator/0.log" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.403509 4780 generic.go:334] "Generic (PLEG): container finished" podID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerID="5af94e3d01ed1885e1fac4fa2df29f95bf242797062a1e13799f3ef62e005536" exitCode=2 Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.403873 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" event={"ID":"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc","Type":"ContainerDied","Data":"5af94e3d01ed1885e1fac4fa2df29f95bf242797062a1e13799f3ef62e005536"} Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.407482 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"d5885af1-d160-445e-9c10-fb97487ef916","Type":"ContainerStarted","Data":"989048f8991592f53e74997c80b7d4484c1d1c19f4537e13e85079d2a6288456"} Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.494071 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.495340 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:07.995305701 +0000 UTC m=+192.848699144 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.599064 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.599834 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.099809148 +0000 UTC m=+192.953202591 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.700721 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.701171 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.201154462 +0000 UTC m=+193.054547905 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.784601 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-fxxzq" podStartSLOduration=164.784561763 podStartE2EDuration="2m44.784561763s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:07.706180345 +0000 UTC m=+192.559573778" watchObservedRunningTime="2025-12-10 10:48:07.784561763 +0000 UTC m=+192.637955206" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.856772 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.857147 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.357103007 +0000 UTC m=+193.210496450 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.857495 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.858188 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.358173056 +0000 UTC m=+193.211566509 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.861289 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:07 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:07 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:07 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.861356 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.874811 4780 patch_prober.go:28] interesting pod/apiserver-76f77b778f-9hgsw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.874893 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" podUID="5464f7ae-9634-4208-a5f5-3e6299f72639" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.969064 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.969616 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.469575426 +0000 UTC m=+193.322968869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:07 crc kubenswrapper[4780]: I1210 10:48:07.969911 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:07 crc kubenswrapper[4780]: E1210 10:48:07.970548 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.470524891 +0000 UTC m=+193.323918344 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.116639 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:08 crc kubenswrapper[4780]: E1210 10:48:08.117296 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.617250892 +0000 UTC m=+193.470644335 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.233298 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:08 crc kubenswrapper[4780]: E1210 10:48:08.233964 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.733944602 +0000 UTC m=+193.587338045 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.242111 4780 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-st68f container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.242216 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" podUID="007684b0-7499-47ad-b2d4-27051578f4cc" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.242548 4780 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-st68f container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.242569 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" podUID="007684b0-7499-47ad-b2d4-27051578f4cc" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.272120 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-kc2sh" podStartSLOduration=165.272079018 podStartE2EDuration="2m45.272079018s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:08.189939111 +0000 UTC m=+193.043332554" watchObservedRunningTime="2025-12-10 10:48:08.272079018 +0000 UTC m=+193.125472461" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.273793 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.275165 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.334786 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:08 crc kubenswrapper[4780]: E1210 10:48:08.335443 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.83542229 +0000 UTC m=+193.688815723 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.353012 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.353325 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.354030 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.409555 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-6ffpq" podStartSLOduration=165.409530605 podStartE2EDuration="2m45.409530605s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:08.404526863 +0000 UTC m=+193.257920326" watchObservedRunningTime="2025-12-10 10:48:08.409530605 +0000 UTC m=+193.262924048" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.419223 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-7777fb866f-q6q9q_027ecd1e-0802-4c3a-b42a-4e272ee3f6fc/openshift-config-operator/0.log" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.419711 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" event={"ID":"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc","Type":"ContainerStarted","Data":"4b99ed9caedd844fcba1558ac0010a5dc7cb1606cc278dc5b59046d103db1697"} Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.421422 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"d5885af1-d160-445e-9c10-fb97487ef916","Type":"ContainerStarted","Data":"6f4e91a6206467089e1dc855961936e434a6b0ea809cdba164e9b3d3ceb0f40f"} Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.423912 4780 generic.go:334] "Generic (PLEG): container finished" podID="536a1b3e-4cda-4ed7-985a-595c13968356" containerID="9a6dcf7d26cfc3d7057fe07bbb5f952e4b4522fe78fd34e23be5869efd50de11" exitCode=0 Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.423993 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" event={"ID":"536a1b3e-4cda-4ed7-985a-595c13968356","Type":"ContainerDied","Data":"9a6dcf7d26cfc3d7057fe07bbb5f952e4b4522fe78fd34e23be5869efd50de11"} Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.436577 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.436750 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.436781 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:08 crc kubenswrapper[4780]: E1210 10:48:08.437270 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:08.937193065 +0000 UTC m=+193.790586508 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.486523 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-46s5p" podStartSLOduration=164.486495466 podStartE2EDuration="2m44.486495466s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:08.484024331 +0000 UTC m=+193.337417764" watchObservedRunningTime="2025-12-10 10:48:08.486495466 +0000 UTC m=+193.339888899" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.538070 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.538833 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.538894 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:08 crc kubenswrapper[4780]: E1210 10:48:08.539789 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:09.039755472 +0000 UTC m=+193.893148915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.539904 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.645851 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:08 crc kubenswrapper[4780]: E1210 10:48:08.646482 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:09.146454257 +0000 UTC m=+193.999847700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.666210 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.786618 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:08 crc kubenswrapper[4780]: E1210 10:48:08.787099 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:09.287078548 +0000 UTC m=+194.140471991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.788826 4780 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-csjzj container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.788902 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" podUID="75086894-fa83-41dd-ad4e-cca8d61f0869" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.790008 4780 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-csjzj container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" start-of-body= Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.790046 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" podUID="75086894-fa83-41dd-ad4e-cca8d61f0869" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.30:5443/healthz\": dial tcp 10.217.0.30:5443: connect: connection refused" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.886273 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:08 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:08 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:08 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.886399 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.888227 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:08 crc kubenswrapper[4780]: E1210 10:48:08.888791 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:09.388770361 +0000 UTC m=+194.242163804 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:08 crc kubenswrapper[4780]: I1210 10:48:08.951891 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.005833 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:09 crc kubenswrapper[4780]: E1210 10:48:09.031054 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:09.530973024 +0000 UTC m=+194.384366467 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.140724 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:09 crc kubenswrapper[4780]: E1210 10:48:09.142225 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:09.642203809 +0000 UTC m=+194.495597252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.213454 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.214029 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.330372 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:09 crc kubenswrapper[4780]: E1210 10:48:09.330909 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:09.830885858 +0000 UTC m=+194.684279301 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.432990 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:09 crc kubenswrapper[4780]: E1210 10:48:09.433466 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:09.933447674 +0000 UTC m=+194.786841117 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.537650 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.537913 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" podStartSLOduration=165.53786901 podStartE2EDuration="2m45.53786901s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:09.455817605 +0000 UTC m=+194.309211048" watchObservedRunningTime="2025-12-10 10:48:09.53786901 +0000 UTC m=+194.391262453" Dec 10 10:48:09 crc kubenswrapper[4780]: E1210 10:48:09.538377 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:10.038351872 +0000 UTC m=+194.891745315 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.555513 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-snzm9" podStartSLOduration=27.555467764 podStartE2EDuration="27.555467764s" podCreationTimestamp="2025-12-10 10:47:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:09.061098129 +0000 UTC m=+193.914491572" watchObservedRunningTime="2025-12-10 10:48:09.555467764 +0000 UTC m=+194.408861217" Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.697027 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:09 crc kubenswrapper[4780]: E1210 10:48:09.697663 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:10.197644403 +0000 UTC m=+195.051037846 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.740022 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7vv54" event={"ID":"57848ad9-ff93-4d90-9eb5-0825e149694d","Type":"ContainerStarted","Data":"4d7103f8fc64e226185ac2a91aec8fc0dae5490a458b3296e5f212517daeb7d0"} Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.740304 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:48:09 crc kubenswrapper[4780]: I1210 10:48:09.801808 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:09 crc kubenswrapper[4780]: E1210 10:48:09.802318 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:10.30229133 +0000 UTC m=+195.155684773 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.243910 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:10 crc kubenswrapper[4780]: E1210 10:48:10.246613 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:10.746594046 +0000 UTC m=+195.599987479 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.270766 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-v57bz" podStartSLOduration=166.270738112 podStartE2EDuration="2m46.270738112s" podCreationTimestamp="2025-12-10 10:45:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:10.268435312 +0000 UTC m=+195.121828775" watchObservedRunningTime="2025-12-10 10:48:10.270738112 +0000 UTC m=+195.124131565" Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.272833 4780 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-dgfj7 container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.273296 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" podUID="135e9ec7-c882-4894-a24e-669b09be3f5b" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.275882 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:10 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:10 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:10 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.275997 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.633010 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:10 crc kubenswrapper[4780]: E1210 10:48:10.633489 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:11.133472248 +0000 UTC m=+195.986865691 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.738307 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:10 crc kubenswrapper[4780]: E1210 10:48:10.739054 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:11.239030179 +0000 UTC m=+196.092423622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.804685 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=10.803340243 podStartE2EDuration="10.803340243s" podCreationTimestamp="2025-12-10 10:48:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:10.741561816 +0000 UTC m=+195.594955259" watchObservedRunningTime="2025-12-10 10:48:10.803340243 +0000 UTC m=+195.656733696" Dec 10 10:48:10 crc kubenswrapper[4780]: I1210 10:48:10.839302 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:10 crc kubenswrapper[4780]: E1210 10:48:10.841007 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:11.340983316 +0000 UTC m=+196.194376759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:11 crc kubenswrapper[4780]: I1210 10:48:11.170421 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:11 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:11 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:11 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:11 crc kubenswrapper[4780]: I1210 10:48:11.170507 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:11 crc kubenswrapper[4780]: I1210 10:48:11.171064 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:11 crc kubenswrapper[4780]: E1210 10:48:11.171908 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:11.671888046 +0000 UTC m=+196.525281489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:11 crc kubenswrapper[4780]: I1210 10:48:11.272111 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:11 crc kubenswrapper[4780]: E1210 10:48:11.273022 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:11.772902312 +0000 UTC m=+196.626295765 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:11 crc kubenswrapper[4780]: I1210 10:48:11.273471 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:11 crc kubenswrapper[4780]: E1210 10:48:11.274083 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:11.774071683 +0000 UTC m=+196.627465126 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:11 crc kubenswrapper[4780]: I1210 10:48:11.403539 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:11 crc kubenswrapper[4780]: E1210 10:48:11.404292 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:11.904226847 +0000 UTC m=+196.757620470 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.230365 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:12 crc kubenswrapper[4780]: E1210 10:48:12.231714 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:13.23162491 +0000 UTC m=+198.085018353 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.246408 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.247020 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.249691 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.249811 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.276482 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:12 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:12 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:12 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.276610 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.383427 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:12 crc kubenswrapper[4780]: E1210 10:48:12.385979 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:12.885957163 +0000 UTC m=+197.739350606 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.512599 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:12 crc kubenswrapper[4780]: E1210 10:48:12.513208 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:13.01316682 +0000 UTC m=+197.866560263 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.821876 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:12 crc kubenswrapper[4780]: E1210 10:48:12.830278 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:13.330246147 +0000 UTC m=+198.183639590 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.833183 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:12 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:12 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:12 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:12 crc kubenswrapper[4780]: I1210 10:48:12.833641 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.007141 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:13 crc kubenswrapper[4780]: E1210 10:48:13.007695 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:13.507657408 +0000 UTC m=+198.361050851 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.111312 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:13 crc kubenswrapper[4780]: E1210 10:48:13.111810 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:13.611790846 +0000 UTC m=+198.465184289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.205934 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.216832 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:13 crc kubenswrapper[4780]: E1210 10:48:13.217382 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:13.717361392 +0000 UTC m=+198.570754835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.366423 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:13 crc kubenswrapper[4780]: E1210 10:48:13.367221 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:13.867191315 +0000 UTC m=+198.720584758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.485976 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:13 crc kubenswrapper[4780]: E1210 10:48:13.486716 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:13.986676988 +0000 UTC m=+198.840070441 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.759314 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:13 crc kubenswrapper[4780]: E1210 10:48:13.760488 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:14.260463503 +0000 UTC m=+199.113856946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.968146 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:13 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:13 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:13 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.968261 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:13 crc kubenswrapper[4780]: I1210 10:48:13.982039 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:13 crc kubenswrapper[4780]: E1210 10:48:13.982711 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:14.482678686 +0000 UTC m=+199.336072129 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:14 crc kubenswrapper[4780]: I1210 10:48:14.089645 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:14 crc kubenswrapper[4780]: E1210 10:48:14.090234 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:14.590212874 +0000 UTC m=+199.443606317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:14 crc kubenswrapper[4780]: I1210 10:48:14.615262 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:14 crc kubenswrapper[4780]: E1210 10:48:14.627737 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:15.627671435 +0000 UTC m=+200.481064868 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:14.750806 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:15 crc kubenswrapper[4780]: E1210 10:48:14.751706 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:15.251650647 +0000 UTC m=+200.105044090 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:14.769692 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:15 crc kubenswrapper[4780]: E1210 10:48:14.770212 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:15.270194626 +0000 UTC m=+200.123588069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:14.802519 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"3ad0d4e6-e148-4fef-aeb8-c0370122c344","Type":"ContainerStarted","Data":"2ef6b6c4f4723968585689a82b14ef814d3e0c607184923dc4c17eaff9f48c43"} Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.315793 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.316878 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:15 crc kubenswrapper[4780]: E1210 10:48:15.317006 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:16.31698651 +0000 UTC m=+201.170379953 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.319256 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:15 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:15 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:15 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.319310 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.322293 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.330210 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.330291 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.330451 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7vv54" event={"ID":"57848ad9-ff93-4d90-9eb5-0825e149694d","Type":"ContainerStarted","Data":"46e24fe501f9805106a03ab9173823218d90f30a643b3d9565ae60012f025424"} Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.330799 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.330883 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.334119 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.334470 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4" event={"ID":"536a1b3e-4cda-4ed7-985a-595c13968356","Type":"ContainerDied","Data":"f1136568a98c4a91b67688cc8e286f0c686f0976bce135589091655d8ac021be"} Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.334558 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1136568a98c4a91b67688cc8e286f0c686f0976bce135589091655d8ac021be" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.340156 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.344125 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.344201 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.480172 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.480272 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.481803 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:15 crc kubenswrapper[4780]: E1210 10:48:15.482127 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:15.982075061 +0000 UTC m=+200.835468504 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.483158 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:15 crc kubenswrapper[4780]: E1210 10:48:15.501546 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:16.001521834 +0000 UTC m=+200.854915277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.699944 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.699998 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7dws\" (UniqueName: \"kubernetes.io/projected/536a1b3e-4cda-4ed7-985a-595c13968356-kube-api-access-f7dws\") pod \"536a1b3e-4cda-4ed7-985a-595c13968356\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.700023 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/536a1b3e-4cda-4ed7-985a-595c13968356-config-volume\") pod \"536a1b3e-4cda-4ed7-985a-595c13968356\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.700068 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/536a1b3e-4cda-4ed7-985a-595c13968356-secret-volume\") pod \"536a1b3e-4cda-4ed7-985a-595c13968356\" (UID: \"536a1b3e-4cda-4ed7-985a-595c13968356\") " Dec 10 10:48:15 crc kubenswrapper[4780]: E1210 10:48:15.705182 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:16.205139097 +0000 UTC m=+201.058532540 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.705686 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/536a1b3e-4cda-4ed7-985a-595c13968356-config-volume" (OuterVolumeSpecName: "config-volume") pod "536a1b3e-4cda-4ed7-985a-595c13968356" (UID: "536a1b3e-4cda-4ed7-985a-595c13968356"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.723552 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/536a1b3e-4cda-4ed7-985a-595c13968356-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "536a1b3e-4cda-4ed7-985a-595c13968356" (UID: "536a1b3e-4cda-4ed7-985a-595c13968356"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.759558 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/536a1b3e-4cda-4ed7-985a-595c13968356-kube-api-access-f7dws" (OuterVolumeSpecName: "kube-api-access-f7dws") pod "536a1b3e-4cda-4ed7-985a-595c13968356" (UID: "536a1b3e-4cda-4ed7-985a-595c13968356"). InnerVolumeSpecName "kube-api-access-f7dws". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.784301 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pdmqg"] Dec 10 10:48:15 crc kubenswrapper[4780]: E1210 10:48:15.784709 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="536a1b3e-4cda-4ed7-985a-595c13968356" containerName="collect-profiles" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.784742 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="536a1b3e-4cda-4ed7-985a-595c13968356" containerName="collect-profiles" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.784994 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="536a1b3e-4cda-4ed7-985a-595c13968356" containerName="collect-profiles" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.786204 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.786355 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z8b9w"] Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.792671 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.793085 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.796655 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.802240 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.802390 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7dws\" (UniqueName: \"kubernetes.io/projected/536a1b3e-4cda-4ed7-985a-595c13968356-kube-api-access-f7dws\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.802410 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/536a1b3e-4cda-4ed7-985a-595c13968356-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.802428 4780 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/536a1b3e-4cda-4ed7-985a-595c13968356-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:15 crc kubenswrapper[4780]: E1210 10:48:15.802864 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:16.302842465 +0000 UTC m=+201.156235908 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.807000 4780 patch_prober.go:28] interesting pod/console-f9d7485db-2dwc9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.35:8443/health\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.812420 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-2dwc9" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.35:8443/health\": dial tcp 10.217.0.35:8443: connect: connection refused" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.906553 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:15 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:15 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:15 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.906667 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.909674 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.910210 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-catalog-content\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.910299 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czq5m\" (UniqueName: \"kubernetes.io/projected/59f1ed36-eccd-4cd4-af95-f32539d40314-kube-api-access-czq5m\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.910434 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kql5b\" (UniqueName: \"kubernetes.io/projected/3234cf0e-6206-4a41-8474-f1893163954f-kube-api-access-kql5b\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.910478 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-utilities\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.910569 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-catalog-content\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:15 crc kubenswrapper[4780]: I1210 10:48:15.910643 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-utilities\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:16 crc kubenswrapper[4780]: E1210 10:48:16.093988 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:16.593949227 +0000 UTC m=+201.447342670 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:16 crc kubenswrapper[4780]: I1210 10:48:16.098408 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czq5m\" (UniqueName: \"kubernetes.io/projected/59f1ed36-eccd-4cd4-af95-f32539d40314-kube-api-access-czq5m\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:16 crc kubenswrapper[4780]: I1210 10:48:16.098528 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kql5b\" (UniqueName: \"kubernetes.io/projected/3234cf0e-6206-4a41-8474-f1893163954f-kube-api-access-kql5b\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:16 crc kubenswrapper[4780]: I1210 10:48:16.098560 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-utilities\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:16 crc kubenswrapper[4780]: I1210 10:48:16.098596 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:16 crc kubenswrapper[4780]: I1210 10:48:16.098632 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-catalog-content\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:16 crc kubenswrapper[4780]: I1210 10:48:16.098697 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-catalog-content\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.029614 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-utilities\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:17 crc kubenswrapper[4780]: E1210 10:48:17.068442 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:17.56839796 +0000 UTC m=+202.421791403 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.069285 4780 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.070304 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.070647 4780 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qllmj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.41:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.070761 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.41:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 10 10:48:17 crc kubenswrapper[4780]: E1210 10:48:17.071331 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:17.571285156 +0000 UTC m=+202.424678599 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.078827 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-catalog-content\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.083159 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-catalog-content\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.095387 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:17 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:17 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:17 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.095598 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:17 crc kubenswrapper[4780]: E1210 10:48:17.130218 4780 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.036s" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.130541 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.130657 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.144102 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-4sfhd" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.178550 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-utilities\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.178879 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:17 crc kubenswrapper[4780]: E1210 10:48:17.179737 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:17.679708847 +0000 UTC m=+202.533102290 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:17 crc kubenswrapper[4780]: I1210 10:48:17.180432 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-utilities\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:18 crc kubenswrapper[4780]: I1210 10:48:18.703212 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.704168 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.708715 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.706405 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.708820 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.707162 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:19 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:19 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:19 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.708851 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.708912 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.710085 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.710127 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:18.711482 4780 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-10T10:48:17.069329405Z","Handler":null,"Name":""} Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.376566 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:19 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:19 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:19 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.376942 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.378604 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="openshift-config-operator" containerStatusID={"Type":"cri-o","ID":"4b99ed9caedd844fcba1558ac0010a5dc7cb1606cc278dc5b59046d103db1697"} pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" containerMessage="Container openshift-config-operator failed liveness probe, will be restarted" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.378698 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" containerID="cri-o://4b99ed9caedd844fcba1558ac0010a5dc7cb1606cc278dc5b59046d103db1697" gracePeriod=30 Dec 10 10:48:19 crc kubenswrapper[4780]: E1210 10:48:19.379076 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:20.379050802 +0000 UTC m=+205.232444245 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.387292 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:19 crc kubenswrapper[4780]: E1210 10:48:19.388423 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:19.888399108 +0000 UTC m=+204.741792541 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.388541 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:19 crc kubenswrapper[4780]: E1210 10:48:19.389085 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:19.889063786 +0000 UTC m=+204.742457229 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.394396 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dgfj7" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.394449 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7sdgl"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.395755 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.432173 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z8b9w"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.432297 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-csjzj" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.436215 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kql5b\" (UniqueName: \"kubernetes.io/projected/3234cf0e-6206-4a41-8474-f1893163954f-kube-api-access-kql5b\") pod \"community-operators-z8b9w\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.449618 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdmqg"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.461289 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czq5m\" (UniqueName: \"kubernetes.io/projected/59f1ed36-eccd-4cd4-af95-f32539d40314-kube-api-access-czq5m\") pod \"certified-operators-pdmqg\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.476270 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.490013 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.490619 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-utilities\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.490719 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kgr5\" (UniqueName: \"kubernetes.io/projected/983b01b2-448a-462b-a87c-8d66c7824940-kube-api-access-5kgr5\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.490797 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-catalog-content\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: E1210 10:48:19.491029 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:19.991001716 +0000 UTC m=+204.844395159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.504075 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7sdgl"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.510218 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zms8r"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.514670 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.538706 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-st68f" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.540185 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zms8r"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.595239 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-w2bd9"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.596745 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-catalog-content\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.596845 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k8m7\" (UniqueName: \"kubernetes.io/projected/73eb2180-ca1d-4860-9306-982a9b3930b9-kube-api-access-7k8m7\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.597016 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.597097 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-utilities\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.597212 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-utilities\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.597310 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kgr5\" (UniqueName: \"kubernetes.io/projected/983b01b2-448a-462b-a87c-8d66c7824940-kube-api-access-5kgr5\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.597397 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.597413 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-catalog-content\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.598827 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-catalog-content\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.598883 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-utilities\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:19 crc kubenswrapper[4780]: E1210 10:48:19.601674 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:20.101623115 +0000 UTC m=+204.955016558 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.667886 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w2bd9"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.681111 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4rw4n"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.683329 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.685999 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.697694 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rw4n"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.706494 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.707192 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xppl9\" (UniqueName: \"kubernetes.io/projected/a586027d-c0c6-4647-9318-23727f40a928-kube-api-access-xppl9\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:19 crc kubenswrapper[4780]: E1210 10:48:19.707518 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:20.207469708 +0000 UTC m=+205.060863151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.707653 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-catalog-content\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.707967 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k8m7\" (UniqueName: \"kubernetes.io/projected/73eb2180-ca1d-4860-9306-982a9b3930b9-kube-api-access-7k8m7\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.707995 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-utilities\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.708324 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjf86\" (UniqueName: \"kubernetes.io/projected/eee88117-019c-44a5-8a7f-95a655e53a27-kube-api-access-hjf86\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.708495 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.708690 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-catalog-content\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.708702 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-catalog-content\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.708715 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-catalog-content\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:19 crc kubenswrapper[4780]: E1210 10:48:19.709800 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:20.209773449 +0000 UTC m=+205.063166982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.713355 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-utilities\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.713387 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-utilities\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.714674 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-utilities\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.726982 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.729069 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jmx2c"] Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.731164 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.751226 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 10:48:19 crc kubenswrapper[4780]: I1210 10:48:19.764786 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jmx2c"] Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.790938 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k8m7\" (UniqueName: \"kubernetes.io/projected/73eb2180-ca1d-4860-9306-982a9b3930b9-kube-api-access-7k8m7\") pod \"certified-operators-zms8r\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.853070 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.854183 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-utilities\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.854333 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xppl9\" (UniqueName: \"kubernetes.io/projected/a586027d-c0c6-4647-9318-23727f40a928-kube-api-access-xppl9\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.854477 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-utilities\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.854534 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjf86\" (UniqueName: \"kubernetes.io/projected/eee88117-019c-44a5-8a7f-95a655e53a27-kube-api-access-hjf86\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.854623 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-catalog-content\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.854659 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-catalog-content\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.855752 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-catalog-content\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:20 crc kubenswrapper[4780]: E1210 10:48:19.855873 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:20.355849933 +0000 UTC m=+205.209243366 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.859394 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-utilities\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.860536 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-catalog-content\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:19.922777 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-utilities\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.536752 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:20 crc kubenswrapper[4780]: E1210 10:48:20.540117 4780 goroutinemap.go:150] Operation for "/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" failed. No retries permitted until 2025-12-10 10:48:21.040084778 +0000 UTC m=+205.893478221 (durationBeforeRetry 500ms). Error: RegisterPlugin error -- failed to get plugin info using RPC GetInfo at socket /var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock, err: rpc error: code = DeadlineExceeded desc = context deadline exceeded Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.584320 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kgr5\" (UniqueName: \"kubernetes.io/projected/983b01b2-448a-462b-a87c-8d66c7824940-kube-api-access-5kgr5\") pod \"community-operators-7sdgl\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.622108 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:20 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:20 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:20 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.622215 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:20 crc kubenswrapper[4780]: E1210 10:48:20.696574 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:21.696479355 +0000 UTC m=+206.549872788 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.700087 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.700245 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-catalog-content\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.700402 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.700591 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-utilities\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.700633 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fr75z\" (UniqueName: \"kubernetes.io/projected/9a59ce5a-0c36-4120-be63-8f2051a58e78-kube-api-access-fr75z\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:20 crc kubenswrapper[4780]: E1210 10:48:20.701727 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:21.201709293 +0000 UTC m=+206.055102736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.728117 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.803055 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dmtlf"] Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.814651 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.825116 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:20 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:20 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:20 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.825372 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.836678 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.837595 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-utilities\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.837653 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fr75z\" (UniqueName: \"kubernetes.io/projected/9a59ce5a-0c36-4120-be63-8f2051a58e78-kube-api-access-fr75z\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.837696 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-catalog-content\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:20 crc kubenswrapper[4780]: E1210 10:48:20.838590 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:21.338539014 +0000 UTC m=+206.191932447 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.841869 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-catalog-content\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.861535 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-utilities\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:20 crc kubenswrapper[4780]: I1210 10:48:20.877492 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xppl9\" (UniqueName: \"kubernetes.io/projected/a586027d-c0c6-4647-9318-23727f40a928-kube-api-access-xppl9\") pod \"redhat-marketplace-4rw4n\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.007287 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:21 crc kubenswrapper[4780]: E1210 10:48:21.007938 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:21.507875532 +0000 UTC m=+206.361268975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.014942 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.016985 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjf86\" (UniqueName: \"kubernetes.io/projected/eee88117-019c-44a5-8a7f-95a655e53a27-kube-api-access-hjf86\") pod \"redhat-marketplace-w2bd9\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.020215 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dmtlf"] Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.042368 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fr75z\" (UniqueName: \"kubernetes.io/projected/9a59ce5a-0c36-4120-be63-8f2051a58e78-kube-api-access-fr75z\") pod \"redhat-operators-jmx2c\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.082826 4780 patch_prober.go:28] interesting pod/apiserver-76f77b778f-9hgsw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]log ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]etcd ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/generic-apiserver-start-informers ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/max-in-flight-filter ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/image.openshift.io-apiserver-caches ok Dec 10 10:48:21 crc kubenswrapper[4780]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Dec 10 10:48:21 crc kubenswrapper[4780]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/project.openshift.io-projectcache ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Dec 10 10:48:21 crc kubenswrapper[4780]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-restmapperupdater ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 10 10:48:21 crc kubenswrapper[4780]: livez check failed Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.082935 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" podUID="5464f7ae-9634-4208-a5f5-3e6299f72639" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.115429 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.116824 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.118154 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-utilities\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: E1210 10:48:21.118357 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:21.618320547 +0000 UTC m=+206.471713990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.118828 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzxwn\" (UniqueName: \"kubernetes.io/projected/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-kube-api-access-dzxwn\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.119591 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.119708 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-catalog-content\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: E1210 10:48:21.120413 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:21.620395991 +0000 UTC m=+206.473789434 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.142144 4780 patch_prober.go:28] interesting pod/apiserver-76f77b778f-9hgsw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]log ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]etcd ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/generic-apiserver-start-informers ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/max-in-flight-filter ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/image.openshift.io-apiserver-caches ok Dec 10 10:48:21 crc kubenswrapper[4780]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Dec 10 10:48:21 crc kubenswrapper[4780]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/project.openshift.io-projectcache ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-startinformers ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-restmapperupdater ok Dec 10 10:48:21 crc kubenswrapper[4780]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 10 10:48:21 crc kubenswrapper[4780]: livez check failed Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.142281 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" podUID="5464f7ae-9634-4208-a5f5-3e6299f72639" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.282182 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.282575 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-utilities\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.282649 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzxwn\" (UniqueName: \"kubernetes.io/projected/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-kube-api-access-dzxwn\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.282724 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-catalog-content\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: E1210 10:48:21.283162 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:21.783139526 +0000 UTC m=+206.636532969 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.283360 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-catalog-content\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.283427 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.284275 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-utilities\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.395196 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:21 crc kubenswrapper[4780]: E1210 10:48:21.395857 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:21.89583703 +0000 UTC m=+206.749230473 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.427006 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzxwn\" (UniqueName: \"kubernetes.io/projected/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-kube-api-access-dzxwn\") pod \"redhat-operators-dmtlf\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.497265 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:21 crc kubenswrapper[4780]: E1210 10:48:21.660945 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:22.017302485 +0000 UTC m=+206.870695928 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.662514 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.663028 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:21 crc kubenswrapper[4780]: E1210 10:48:21.663535 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:22.163520223 +0000 UTC m=+207.016913666 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-25gsf" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.686836 4780 generic.go:334] "Generic (PLEG): container finished" podID="d5885af1-d160-445e-9c10-fb97487ef916" containerID="6f4e91a6206467089e1dc855961936e434a6b0ea809cdba164e9b3d3ceb0f40f" exitCode=0 Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.686930 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"d5885af1-d160-445e-9c10-fb97487ef916","Type":"ContainerDied","Data":"6f4e91a6206467089e1dc855961936e434a6b0ea809cdba164e9b3d3ceb0f40f"} Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.697349 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"3ad0d4e6-e148-4fef-aeb8-c0370122c344","Type":"ContainerStarted","Data":"87b22867655208f29544f3617dfbc57579b407d3fc98425a1e2180f22b68fa5a"} Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.718954 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7vv54" event={"ID":"57848ad9-ff93-4d90-9eb5-0825e149694d","Type":"ContainerStarted","Data":"c4544921667b4827f580b76f3fce8ba1161fa8804bda2f3d2b58fca52e646e20"} Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.728045 4780 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-10T10:48:17.069329405Z","Handler":null,"Name":""} Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.762507 4780 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.762568 4780 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 10 10:48:21 crc kubenswrapper[4780]: I1210 10:48:21.773860 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.270679 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.270818 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.288481 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:22 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:22 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:22 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.288540 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.431576 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.506875 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.521369 4780 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.521451 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.886151 4780 patch_prober.go:28] interesting pod/apiserver-76f77b778f-9hgsw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]log ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]etcd ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/generic-apiserver-start-informers ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/max-in-flight-filter ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/image.openshift.io-apiserver-caches ok Dec 10 10:48:22 crc kubenswrapper[4780]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/project.openshift.io-projectcache ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-startinformers ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/openshift.io-restmapperupdater ok Dec 10 10:48:22 crc kubenswrapper[4780]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 10 10:48:22 crc kubenswrapper[4780]: livez check failed Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.886255 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" podUID="5464f7ae-9634-4208-a5f5-3e6299f72639" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:22 crc kubenswrapper[4780]: I1210 10:48:22.886269 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-25gsf\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:23 crc kubenswrapper[4780]: I1210 10:48:23.011086 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:23 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:23 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:23 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:23 crc kubenswrapper[4780]: I1210 10:48:23.011166 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:23 crc kubenswrapper[4780]: I1210 10:48:23.615385 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 10 10:48:23 crc kubenswrapper[4780]: I1210 10:48:23.616968 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:24 crc kubenswrapper[4780]: I1210 10:48:24.439423 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:24 crc kubenswrapper[4780]: I1210 10:48:24.439537 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.152580 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-7777fb866f-q6q9q_027ecd1e-0802-4c3a-b42a-4e272ee3f6fc/openshift-config-operator/1.log" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.155161 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-7777fb866f-q6q9q_027ecd1e-0802-4c3a-b42a-4e272ee3f6fc/openshift-config-operator/0.log" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.156198 4780 generic.go:334] "Generic (PLEG): container finished" podID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerID="4b99ed9caedd844fcba1558ac0010a5dc7cb1606cc278dc5b59046d103db1697" exitCode=255 Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.163985 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.164061 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.195126 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.195221 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.195264 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.195338 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.242906 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.250747 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.250851 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" event={"ID":"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc","Type":"ContainerDied","Data":"4b99ed9caedd844fcba1558ac0010a5dc7cb1606cc278dc5b59046d103db1697"} Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.250972 4780 scope.go:117] "RemoveContainer" containerID="5af94e3d01ed1885e1fac4fa2df29f95bf242797062a1e13799f3ef62e005536" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.252184 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.252235 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.259373 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"deb50716f11ade378405f94e5dfb23374756041d9247c96b96f862d026d8a63e"} pod="openshift-console/downloads-7954f5f757-fxxzq" containerMessage="Container download-server failed liveness probe, will be restarted" Dec 10 10:48:25 crc kubenswrapper[4780]: I1210 10:48:25.259439 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" containerID="cri-o://deb50716f11ade378405f94e5dfb23374756041d9247c96b96f862d026d8a63e" gracePeriod=2 Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:25.850362 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-7vv54" podStartSLOduration=43.850236852 podStartE2EDuration="43.850236852s" podCreationTimestamp="2025-12-10 10:47:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:25.272472903 +0000 UTC m=+210.125866346" watchObservedRunningTime="2025-12-10 10:48:25.850236852 +0000 UTC m=+210.703630295" Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:25.861361 4780 patch_prober.go:28] interesting pod/console-f9d7485db-2dwc9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.35:8443/health\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:25.861468 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-2dwc9" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.35:8443/health\": dial tcp 10.217.0.35:8443: connect: connection refused" Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:25.876591 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:25.981672 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:26 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:26 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:26 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:25.982285 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:26.000252 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:26 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:26 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:26 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:26.000334 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:26.129639 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=19.12960314 podStartE2EDuration="19.12960314s" podCreationTimestamp="2025-12-10 10:48:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:25.998790854 +0000 UTC m=+210.852184297" watchObservedRunningTime="2025-12-10 10:48:26.12960314 +0000 UTC m=+210.982996593" Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:26.815544 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:26 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:26 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:26 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:26 crc kubenswrapper[4780]: I1210 10:48:26.816168 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:27 crc kubenswrapper[4780]: I1210 10:48:27.762423 4780 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-q6q9q container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Dec 10 10:48:27 crc kubenswrapper[4780]: I1210 10:48:27.762530 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" podUID="027ecd1e-0802-4c3a-b42a-4e272ee3f6fc" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Dec 10 10:48:27 crc kubenswrapper[4780]: I1210 10:48:27.770871 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:48:27 crc kubenswrapper[4780]: I1210 10:48:27.770969 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:48:27 crc kubenswrapper[4780]: I1210 10:48:27.832724 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:27 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:27 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:27 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:27 crc kubenswrapper[4780]: I1210 10:48:27.832801 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:27 crc kubenswrapper[4780]: I1210 10:48:27.950543 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.096631 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7sdgl"] Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.096780 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-9hgsw" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.138876 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdmqg"] Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.148173 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dmtlf"] Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.170021 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jmx2c"] Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.174138 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rw4n"] Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.181759 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zms8r"] Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.189168 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.200468 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w2bd9"] Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.203999 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z8b9w"] Dec 10 10:48:28 crc kubenswrapper[4780]: W1210 10:48:28.322948 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode941c8a4_98a7_48d2_9ec6_4e2dec741b54.slice/crio-719eb0a09273bf8233aaa8e9727236f0ee0fac0587cbacd4b88f79b41238376e WatchSource:0}: Error finding container 719eb0a09273bf8233aaa8e9727236f0ee0fac0587cbacd4b88f79b41238376e: Status 404 returned error can't find the container with id 719eb0a09273bf8233aaa8e9727236f0ee0fac0587cbacd4b88f79b41238376e Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.379861 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5885af1-d160-445e-9c10-fb97487ef916-kubelet-dir\") pod \"d5885af1-d160-445e-9c10-fb97487ef916\" (UID: \"d5885af1-d160-445e-9c10-fb97487ef916\") " Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.380410 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5885af1-d160-445e-9c10-fb97487ef916-kube-api-access\") pod \"d5885af1-d160-445e-9c10-fb97487ef916\" (UID: \"d5885af1-d160-445e-9c10-fb97487ef916\") " Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.381965 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d5885af1-d160-445e-9c10-fb97487ef916-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d5885af1-d160-445e-9c10-fb97487ef916" (UID: "d5885af1-d160-445e-9c10-fb97487ef916"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.396338 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5885af1-d160-445e-9c10-fb97487ef916-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d5885af1-d160-445e-9c10-fb97487ef916" (UID: "d5885af1-d160-445e-9c10-fb97487ef916"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.446571 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-25gsf"] Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.488241 4780 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5885af1-d160-445e-9c10-fb97487ef916-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.488311 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5885af1-d160-445e-9c10-fb97487ef916-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.818252 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:28 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:28 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:28 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.818347 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.842440 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmx2c" event={"ID":"9a59ce5a-0c36-4120-be63-8f2051a58e78","Type":"ContainerStarted","Data":"a4a19ad29eebef4b448fcc67a3babccd410651fbc9bdaaba619ab6f5d10e4620"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.870373 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdmqg" event={"ID":"59f1ed36-eccd-4cd4-af95-f32539d40314","Type":"ContainerStarted","Data":"ccd9ed93ea4e0f49a1291b90acee2e0b4004e8272a46b83c143bf1d23ade9333"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.875254 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w2bd9" event={"ID":"eee88117-019c-44a5-8a7f-95a655e53a27","Type":"ContainerStarted","Data":"82594c8c7c6e340f24019b36a4664a1857b60b07ac93f817dba36141d331e5a4"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.907856 4780 generic.go:334] "Generic (PLEG): container finished" podID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerID="deb50716f11ade378405f94e5dfb23374756041d9247c96b96f862d026d8a63e" exitCode=0 Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.908007 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fxxzq" event={"ID":"09fe7cda-3948-484d-bcd9-e83d1ac0610a","Type":"ContainerDied","Data":"deb50716f11ade378405f94e5dfb23374756041d9247c96b96f862d026d8a63e"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.913591 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmtlf" event={"ID":"e941c8a4-98a7-48d2-9ec6-4e2dec741b54","Type":"ContainerStarted","Data":"719eb0a09273bf8233aaa8e9727236f0ee0fac0587cbacd4b88f79b41238376e"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.921806 4780 generic.go:334] "Generic (PLEG): container finished" podID="3ad0d4e6-e148-4fef-aeb8-c0370122c344" containerID="87b22867655208f29544f3617dfbc57579b407d3fc98425a1e2180f22b68fa5a" exitCode=0 Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.921963 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"3ad0d4e6-e148-4fef-aeb8-c0370122c344","Type":"ContainerDied","Data":"87b22867655208f29544f3617dfbc57579b407d3fc98425a1e2180f22b68fa5a"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.950006 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-config-operator_openshift-config-operator-7777fb866f-q6q9q_027ecd1e-0802-4c3a-b42a-4e272ee3f6fc/openshift-config-operator/1.log" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.956032 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" event={"ID":"027ecd1e-0802-4c3a-b42a-4e272ee3f6fc","Type":"ContainerStarted","Data":"ec5fd02ea17715debf5cf19773107a2cb72b38e5cce2145ced4992b3350b1cb8"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.956147 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.971481 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rw4n" event={"ID":"a586027d-c0c6-4647-9318-23727f40a928","Type":"ContainerStarted","Data":"467c1620a254e16d0c023a9eff579ca083791d523bbf2f043a936680ff0f97b3"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.985722 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"d5885af1-d160-445e-9c10-fb97487ef916","Type":"ContainerDied","Data":"989048f8991592f53e74997c80b7d4484c1d1c19f4537e13e85079d2a6288456"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.985791 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="989048f8991592f53e74997c80b7d4484c1d1c19f4537e13e85079d2a6288456" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.985941 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.990707 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8b9w" event={"ID":"3234cf0e-6206-4a41-8474-f1893163954f","Type":"ContainerStarted","Data":"dcefa9125bd9f98a7cfeb73ecf19d603323ecd2f3a802f8e76ae2be265a9f29f"} Dec 10 10:48:28 crc kubenswrapper[4780]: I1210 10:48:28.999242 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zms8r" event={"ID":"73eb2180-ca1d-4860-9306-982a9b3930b9","Type":"ContainerStarted","Data":"301711943ea4535a8eaeb1986744bf60809f1cae1e308d120c426f2c5121f91c"} Dec 10 10:48:29 crc kubenswrapper[4780]: I1210 10:48:29.013835 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" event={"ID":"b7abac51-adc5-42fa-9084-033e4e7e7acb","Type":"ContainerStarted","Data":"8741fac9d147b7aa199416b76d1e641d7a6af31295564264120a8d9720d1d0b3"} Dec 10 10:48:29 crc kubenswrapper[4780]: I1210 10:48:29.035525 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sdgl" event={"ID":"983b01b2-448a-462b-a87c-8d66c7824940","Type":"ContainerStarted","Data":"f3bf79a4f10bc9e0ec93b6bf81163f03bc8eaa24ccd42bce6ee80e87707fb3e0"} Dec 10 10:48:29 crc kubenswrapper[4780]: I1210 10:48:29.846499 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:29 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:29 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:29 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:29 crc kubenswrapper[4780]: I1210 10:48:29.847241 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.046052 4780 generic.go:334] "Generic (PLEG): container finished" podID="a586027d-c0c6-4647-9318-23727f40a928" containerID="05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894" exitCode=0 Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.046171 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rw4n" event={"ID":"a586027d-c0c6-4647-9318-23727f40a928","Type":"ContainerDied","Data":"05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.049011 4780 generic.go:334] "Generic (PLEG): container finished" podID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerID="cce45565e624e60bcce24122b2e5905b07c6dacda21fd7a2a4204e55acdeb6d5" exitCode=0 Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.049223 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zms8r" event={"ID":"73eb2180-ca1d-4860-9306-982a9b3930b9","Type":"ContainerDied","Data":"cce45565e624e60bcce24122b2e5905b07c6dacda21fd7a2a4204e55acdeb6d5"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.050883 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.055477 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" event={"ID":"b7abac51-adc5-42fa-9084-033e4e7e7acb","Type":"ContainerStarted","Data":"a94d644d3444419d081379b16721f67e94d462caab5fc48fc02aff3a2f18b54d"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.055663 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.059396 4780 generic.go:334] "Generic (PLEG): container finished" podID="983b01b2-448a-462b-a87c-8d66c7824940" containerID="824b8cfa81539bbf8eb4c4cf207a185d22c32d0f0fba83be23f33e8020532592" exitCode=0 Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.059870 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sdgl" event={"ID":"983b01b2-448a-462b-a87c-8d66c7824940","Type":"ContainerDied","Data":"824b8cfa81539bbf8eb4c4cf207a185d22c32d0f0fba83be23f33e8020532592"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.067023 4780 generic.go:334] "Generic (PLEG): container finished" podID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerID="80d3de265f9a2be3c2dbe0286029e1f007b739e5f65470db6f17a6bb88c273ff" exitCode=0 Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.067120 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdmqg" event={"ID":"59f1ed36-eccd-4cd4-af95-f32539d40314","Type":"ContainerDied","Data":"80d3de265f9a2be3c2dbe0286029e1f007b739e5f65470db6f17a6bb88c273ff"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.071435 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fxxzq" event={"ID":"09fe7cda-3948-484d-bcd9-e83d1ac0610a","Type":"ContainerStarted","Data":"1690e66a74588d4a884eff7b7319d746b5fff6d769254efe5c23cb68ca93559e"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.071826 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.074564 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.074696 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.082958 4780 generic.go:334] "Generic (PLEG): container finished" podID="eee88117-019c-44a5-8a7f-95a655e53a27" containerID="2f330049483197fc3fc8845f679b7b8a2b5d6924516d31269a45d57241452e27" exitCode=0 Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.083068 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w2bd9" event={"ID":"eee88117-019c-44a5-8a7f-95a655e53a27","Type":"ContainerDied","Data":"2f330049483197fc3fc8845f679b7b8a2b5d6924516d31269a45d57241452e27"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.089159 4780 generic.go:334] "Generic (PLEG): container finished" podID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerID="bcb09aca48c3719eedd5ae3b18893f834e9b0de00d6405d0dcec6f54cb60bfcf" exitCode=0 Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.089272 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmtlf" event={"ID":"e941c8a4-98a7-48d2-9ec6-4e2dec741b54","Type":"ContainerDied","Data":"bcb09aca48c3719eedd5ae3b18893f834e9b0de00d6405d0dcec6f54cb60bfcf"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.118278 4780 generic.go:334] "Generic (PLEG): container finished" podID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerID="16efd2c094162ffd9427ac555ed1a726c6c3caec8549fa2d103e09372301d48a" exitCode=0 Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.118371 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmx2c" event={"ID":"9a59ce5a-0c36-4120-be63-8f2051a58e78","Type":"ContainerDied","Data":"16efd2c094162ffd9427ac555ed1a726c6c3caec8549fa2d103e09372301d48a"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.128581 4780 generic.go:334] "Generic (PLEG): container finished" podID="3234cf0e-6206-4a41-8474-f1893163954f" containerID="6d525e6dca5c42e82d262ae4b4375a3b7649063384c24d0217f2b182b2c71036" exitCode=0 Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.128942 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8b9w" event={"ID":"3234cf0e-6206-4a41-8474-f1893163954f","Type":"ContainerDied","Data":"6d525e6dca5c42e82d262ae4b4375a3b7649063384c24d0217f2b182b2c71036"} Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.910728 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:30 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:30 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:30 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:30 crc kubenswrapper[4780]: I1210 10:48:30.911215 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.064748 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.092013 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" podStartSLOduration=188.091983484 podStartE2EDuration="3m8.091983484s" podCreationTimestamp="2025-12-10 10:45:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:30.567394041 +0000 UTC m=+215.420787504" watchObservedRunningTime="2025-12-10 10:48:31.091983484 +0000 UTC m=+215.945376937" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.142189 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.142323 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"3ad0d4e6-e148-4fef-aeb8-c0370122c344","Type":"ContainerDied","Data":"2ef6b6c4f4723968585689a82b14ef814d3e0c607184923dc4c17eaff9f48c43"} Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.142348 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ef6b6c4f4723968585689a82b14ef814d3e0c607184923dc4c17eaff9f48c43" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.143394 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.143472 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.213661 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kubelet-dir\") pod \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\" (UID: \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\") " Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.213789 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kube-api-access\") pod \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\" (UID: \"3ad0d4e6-e148-4fef-aeb8-c0370122c344\") " Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.215163 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3ad0d4e6-e148-4fef-aeb8-c0370122c344" (UID: "3ad0d4e6-e148-4fef-aeb8-c0370122c344"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.317725 4780 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.404408 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3ad0d4e6-e148-4fef-aeb8-c0370122c344" (UID: "3ad0d4e6-e148-4fef-aeb8-c0370122c344"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.422398 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ad0d4e6-e148-4fef-aeb8-c0370122c344-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.496715 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hx2j2" Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.829965 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:31 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:31 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:31 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:31 crc kubenswrapper[4780]: I1210 10:48:31.830184 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:32 crc kubenswrapper[4780]: I1210 10:48:32.802538 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:32 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:32 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:32 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:32 crc kubenswrapper[4780]: I1210 10:48:32.802666 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:33 crc kubenswrapper[4780]: I1210 10:48:33.169469 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-q6q9q" Dec 10 10:48:33 crc kubenswrapper[4780]: I1210 10:48:33.964285 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:33 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:33 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:33 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:33 crc kubenswrapper[4780]: I1210 10:48:33.964402 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.005613 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:35 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:35 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:35 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.006294 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.235363 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.235436 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.235577 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.235654 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.815907 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:35 crc kubenswrapper[4780]: [-]has-synced failed: reason withheld Dec 10 10:48:35 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:35 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.816042 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.833808 4780 patch_prober.go:28] interesting pod/console-f9d7485db-2dwc9 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.35:8443/health\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Dec 10 10:48:35 crc kubenswrapper[4780]: I1210 10:48:35.833974 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-2dwc9" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.35:8443/health\": dial tcp 10.217.0.35:8443: connect: connection refused" Dec 10 10:48:36 crc kubenswrapper[4780]: I1210 10:48:36.831134 4780 patch_prober.go:28] interesting pod/router-default-5444994796-qbmwm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:36 crc kubenswrapper[4780]: [+]has-synced ok Dec 10 10:48:36 crc kubenswrapper[4780]: [+]process-running ok Dec 10 10:48:36 crc kubenswrapper[4780]: healthz check failed Dec 10 10:48:36 crc kubenswrapper[4780]: I1210 10:48:36.831241 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qbmwm" podUID="1226614f-560f-40b9-81a2-595e79043653" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:38 crc kubenswrapper[4780]: I1210 10:48:38.493264 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:48:38 crc kubenswrapper[4780]: I1210 10:48:38.518931 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-qbmwm" Dec 10 10:48:45 crc kubenswrapper[4780]: I1210 10:48:45.202500 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:45 crc kubenswrapper[4780]: I1210 10:48:45.208090 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:45 crc kubenswrapper[4780]: I1210 10:48:45.206131 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:45 crc kubenswrapper[4780]: I1210 10:48:45.210970 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:46 crc kubenswrapper[4780]: I1210 10:48:46.131671 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:48:46 crc kubenswrapper[4780]: I1210 10:48:46.156949 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:48:53 crc kubenswrapper[4780]: I1210 10:48:53.827075 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.195220 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.195823 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.195955 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.195263 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.196071 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.196504 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.196531 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.196869 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"1690e66a74588d4a884eff7b7319d746b5fff6d769254efe5c23cb68ca93559e"} pod="openshift-console/downloads-7954f5f757-fxxzq" containerMessage="Container download-server failed liveness probe, will be restarted" Dec 10 10:48:55 crc kubenswrapper[4780]: I1210 10:48:55.196988 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" containerID="cri-o://1690e66a74588d4a884eff7b7319d746b5fff6d769254efe5c23cb68ca93559e" gracePeriod=2 Dec 10 10:48:57 crc kubenswrapper[4780]: I1210 10:48:57.476200 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:48:57 crc kubenswrapper[4780]: I1210 10:48:57.476293 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:48:57 crc kubenswrapper[4780]: I1210 10:48:57.476365 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:48:57 crc kubenswrapper[4780]: I1210 10:48:57.477252 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 10:48:57 crc kubenswrapper[4780]: I1210 10:48:57.477325 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d" gracePeriod=600 Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.205500 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 10:48:58 crc kubenswrapper[4780]: E1210 10:48:58.206677 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad0d4e6-e148-4fef-aeb8-c0370122c344" containerName="pruner" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.206718 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad0d4e6-e148-4fef-aeb8-c0370122c344" containerName="pruner" Dec 10 10:48:58 crc kubenswrapper[4780]: E1210 10:48:58.206784 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5885af1-d160-445e-9c10-fb97487ef916" containerName="pruner" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.206795 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5885af1-d160-445e-9c10-fb97487ef916" containerName="pruner" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.207076 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad0d4e6-e148-4fef-aeb8-c0370122c344" containerName="pruner" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.207121 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5885af1-d160-445e-9c10-fb97487ef916" containerName="pruner" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.208478 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.213499 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.215599 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.230967 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.270882 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.271059 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.373550 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.373676 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.373906 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.412969 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:58 crc kubenswrapper[4780]: I1210 10:48:58.684665 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:59 crc kubenswrapper[4780]: I1210 10:48:59.197677 4780 generic.go:334] "Generic (PLEG): container finished" podID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerID="1690e66a74588d4a884eff7b7319d746b5fff6d769254efe5c23cb68ca93559e" exitCode=0 Dec 10 10:48:59 crc kubenswrapper[4780]: I1210 10:48:59.197745 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fxxzq" event={"ID":"09fe7cda-3948-484d-bcd9-e83d1ac0610a","Type":"ContainerDied","Data":"1690e66a74588d4a884eff7b7319d746b5fff6d769254efe5c23cb68ca93559e"} Dec 10 10:48:59 crc kubenswrapper[4780]: I1210 10:48:59.197838 4780 scope.go:117] "RemoveContainer" containerID="deb50716f11ade378405f94e5dfb23374756041d9247c96b96f862d026d8a63e" Dec 10 10:49:03 crc kubenswrapper[4780]: I1210 10:49:03.905557 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 10:49:03 crc kubenswrapper[4780]: I1210 10:49:03.907741 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:03 crc kubenswrapper[4780]: I1210 10:49:03.920455 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.017768 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.018053 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c7b993a1-1915-40e6-b88a-3606990443e1-kube-api-access\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.018154 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-var-lock\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.119656 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c7b993a1-1915-40e6-b88a-3606990443e1-kube-api-access\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.120056 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-var-lock\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.120177 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.120172 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-var-lock\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.120220 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.740766 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c7b993a1-1915-40e6-b88a-3606990443e1-kube-api-access\") pod \"installer-9-crc\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:04 crc kubenswrapper[4780]: I1210 10:49:04.841139 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:05 crc kubenswrapper[4780]: I1210 10:49:05.195369 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:05 crc kubenswrapper[4780]: I1210 10:49:05.195495 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:10 crc kubenswrapper[4780]: I1210 10:49:10.826752 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d" exitCode=0 Dec 10 10:49:10 crc kubenswrapper[4780]: I1210 10:49:10.826858 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d"} Dec 10 10:49:15 crc kubenswrapper[4780]: I1210 10:49:15.199894 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:15 crc kubenswrapper[4780]: I1210 10:49:15.202503 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:25 crc kubenswrapper[4780]: I1210 10:49:25.195387 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:25 crc kubenswrapper[4780]: I1210 10:49:25.196271 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:25 crc kubenswrapper[4780]: E1210 10:49:25.437479 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 10 10:49:25 crc kubenswrapper[4780]: E1210 10:49:25.438187 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-czq5m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-pdmqg_openshift-marketplace(59f1ed36-eccd-4cd4-af95-f32539d40314): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:49:25 crc kubenswrapper[4780]: E1210 10:49:25.439536 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-pdmqg" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" Dec 10 10:49:27 crc kubenswrapper[4780]: E1210 10:49:27.617778 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-pdmqg" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" Dec 10 10:49:27 crc kubenswrapper[4780]: E1210 10:49:27.771957 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 10 10:49:27 crc kubenswrapper[4780]: E1210 10:49:27.772212 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xppl9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-4rw4n_openshift-marketplace(a586027d-c0c6-4647-9318-23727f40a928): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:49:27 crc kubenswrapper[4780]: E1210 10:49:27.773738 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-4rw4n" podUID="a586027d-c0c6-4647-9318-23727f40a928" Dec 10 10:49:27 crc kubenswrapper[4780]: E1210 10:49:27.786401 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 10 10:49:27 crc kubenswrapper[4780]: E1210 10:49:27.786657 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7k8m7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-zms8r_openshift-marketplace(73eb2180-ca1d-4860-9306-982a9b3930b9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:49:27 crc kubenswrapper[4780]: E1210 10:49:27.788109 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-zms8r" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" Dec 10 10:49:30 crc kubenswrapper[4780]: I1210 10:49:30.469493 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qllmj"] Dec 10 10:49:32 crc kubenswrapper[4780]: E1210 10:49:32.222742 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-4rw4n" podUID="a586027d-c0c6-4647-9318-23727f40a928" Dec 10 10:49:32 crc kubenswrapper[4780]: E1210 10:49:32.222774 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-zms8r" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" Dec 10 10:49:32 crc kubenswrapper[4780]: E1210 10:49:32.301449 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 10 10:49:32 crc kubenswrapper[4780]: E1210 10:49:32.301651 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fr75z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-jmx2c_openshift-marketplace(9a59ce5a-0c36-4120-be63-8f2051a58e78): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:49:32 crc kubenswrapper[4780]: E1210 10:49:32.302897 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-jmx2c" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.007873 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-jmx2c" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.091221 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.091679 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5kgr5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-7sdgl_openshift-marketplace(983b01b2-448a-462b-a87c-8d66c7824940): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.093375 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-7sdgl" podUID="983b01b2-448a-462b-a87c-8d66c7824940" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.164482 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.164681 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hjf86,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-w2bd9_openshift-marketplace(eee88117-019c-44a5-8a7f-95a655e53a27): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.166148 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-w2bd9" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.183355 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.183700 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dzxwn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dmtlf_openshift-marketplace(e941c8a4-98a7-48d2-9ec6-4e2dec741b54): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.185180 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-dmtlf" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.188393 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.189428 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kql5b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-z8b9w_openshift-marketplace(3234cf0e-6206-4a41-8474-f1893163954f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.191006 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-z8b9w" podUID="3234cf0e-6206-4a41-8474-f1893163954f" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.258653 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-7sdgl" podUID="983b01b2-448a-462b-a87c-8d66c7824940" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.260075 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-w2bd9" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.260436 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-z8b9w" podUID="3234cf0e-6206-4a41-8474-f1893163954f" Dec 10 10:49:34 crc kubenswrapper[4780]: E1210 10:49:34.261564 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-dmtlf" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" Dec 10 10:49:34 crc kubenswrapper[4780]: I1210 10:49:34.411424 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 10:49:34 crc kubenswrapper[4780]: I1210 10:49:34.715628 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 10:49:34 crc kubenswrapper[4780]: W1210 10:49:34.735905 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod6e92a5ce_d8d1_4368_9c76_78d3dcf9471b.slice/crio-a34b5fca928e376a78b2d092db111f9ab5f0831416be379c1a748db912c67ac7 WatchSource:0}: Error finding container a34b5fca928e376a78b2d092db111f9ab5f0831416be379c1a748db912c67ac7: Status 404 returned error can't find the container with id a34b5fca928e376a78b2d092db111f9ab5f0831416be379c1a748db912c67ac7 Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.194876 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.195982 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.276102 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"6ebc39bea1992f54cd24fdfecca195ad14903b8ade84ac83330f2cc7cf317153"} Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.279173 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"c7b993a1-1915-40e6-b88a-3606990443e1","Type":"ContainerStarted","Data":"4ec0cbaf267c5b2f3da41f014542631517bbabab47493dc895511c5e4eee8f6b"} Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.279248 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"c7b993a1-1915-40e6-b88a-3606990443e1","Type":"ContainerStarted","Data":"ff3ac85a29cdd608f5ed81bacfd648100576ce2d129c6b330a0c6ec767637ca8"} Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.283814 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fxxzq" event={"ID":"09fe7cda-3948-484d-bcd9-e83d1ac0610a","Type":"ContainerStarted","Data":"9a97a1b18b2510e2c34ff0c03bee95c3bd8ee156ceb78ab5376300e29327db34"} Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.284390 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.284479 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.284530 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.286878 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b","Type":"ContainerStarted","Data":"b8591b0d855874709a694d66a75250ef924b5cd3a2fe75275388aaed063beaf3"} Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.287004 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b","Type":"ContainerStarted","Data":"a34b5fca928e376a78b2d092db111f9ab5f0831416be379c1a748db912c67ac7"} Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.357304 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=32.357230841 podStartE2EDuration="32.357230841s" podCreationTimestamp="2025-12-10 10:49:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:49:35.32917911 +0000 UTC m=+280.182572563" watchObservedRunningTime="2025-12-10 10:49:35.357230841 +0000 UTC m=+280.210624284" Dec 10 10:49:35 crc kubenswrapper[4780]: I1210 10:49:35.377323 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=37.37729534 podStartE2EDuration="37.37729534s" podCreationTimestamp="2025-12-10 10:48:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:49:35.371404445 +0000 UTC m=+280.224797908" watchObservedRunningTime="2025-12-10 10:49:35.37729534 +0000 UTC m=+280.230688783" Dec 10 10:49:36 crc kubenswrapper[4780]: I1210 10:49:36.301190 4780 generic.go:334] "Generic (PLEG): container finished" podID="6e92a5ce-d8d1-4368-9c76-78d3dcf9471b" containerID="b8591b0d855874709a694d66a75250ef924b5cd3a2fe75275388aaed063beaf3" exitCode=0 Dec 10 10:49:36 crc kubenswrapper[4780]: I1210 10:49:36.301453 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b","Type":"ContainerDied","Data":"b8591b0d855874709a694d66a75250ef924b5cd3a2fe75275388aaed063beaf3"} Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:36.303146 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:36.303252 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.603647 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.662840 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.662983 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.663064 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.663180 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.666408 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.667149 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.668272 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.677142 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.685404 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.688856 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.690343 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.693053 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.693451 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.764456 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kube-api-access\") pod \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\" (UID: \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\") " Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.764535 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kubelet-dir\") pod \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\" (UID: \"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b\") " Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.764836 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6e92a5ce-d8d1-4368-9c76-78d3dcf9471b" (UID: "6e92a5ce-d8d1-4368-9c76-78d3dcf9471b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.769259 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6e92a5ce-d8d1-4368-9c76-78d3dcf9471b" (UID: "6e92a5ce-d8d1-4368-9c76-78d3dcf9471b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.869033 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.869097 4780 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6e92a5ce-d8d1-4368-9c76-78d3dcf9471b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.977467 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:49:37 crc kubenswrapper[4780]: I1210 10:49:37.986541 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:49:38 crc kubenswrapper[4780]: I1210 10:49:38.336116 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"8b92d1034f8b155b65c7a3541abfd10165748acf99d2fa21956037ec67e7b6b9"} Dec 10 10:49:38 crc kubenswrapper[4780]: I1210 10:49:38.357463 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:49:38 crc kubenswrapper[4780]: I1210 10:49:38.357315 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"6e92a5ce-d8d1-4368-9c76-78d3dcf9471b","Type":"ContainerDied","Data":"a34b5fca928e376a78b2d092db111f9ab5f0831416be379c1a748db912c67ac7"} Dec 10 10:49:38 crc kubenswrapper[4780]: I1210 10:49:38.358201 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a34b5fca928e376a78b2d092db111f9ab5f0831416be379c1a748db912c67ac7" Dec 10 10:49:38 crc kubenswrapper[4780]: W1210 10:49:38.484041 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-7bdca5261c4ec6e9b90e59c346036bc368ab9dbe6c8591efb6ca3e14273639a4 WatchSource:0}: Error finding container 7bdca5261c4ec6e9b90e59c346036bc368ab9dbe6c8591efb6ca3e14273639a4: Status 404 returned error can't find the container with id 7bdca5261c4ec6e9b90e59c346036bc368ab9dbe6c8591efb6ca3e14273639a4 Dec 10 10:49:39 crc kubenswrapper[4780]: I1210 10:49:39.368973 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"7bdca5261c4ec6e9b90e59c346036bc368ab9dbe6c8591efb6ca3e14273639a4"} Dec 10 10:49:39 crc kubenswrapper[4780]: I1210 10:49:39.372978 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"642cc6a92e4518a71f9c91c66b5963a87185b82356f19a4d04b8b47f8879dfff"} Dec 10 10:49:40 crc kubenswrapper[4780]: I1210 10:49:40.380638 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"87390b8e092bdcefc82a2970d89639b66d5c0d67ddbb77108a56c098391ea0ba"} Dec 10 10:49:40 crc kubenswrapper[4780]: I1210 10:49:40.382304 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"3a5d616788d275f0b0aa6afa84f646388a8fa7b5c8534b2b33a54370201a007d"} Dec 10 10:49:40 crc kubenswrapper[4780]: I1210 10:49:40.384314 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"e1854f74baf69e583bf27ea3c4efc41facc3dc5f88bcec00a3444f46a151f601"} Dec 10 10:49:40 crc kubenswrapper[4780]: I1210 10:49:40.385020 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:49:45 crc kubenswrapper[4780]: I1210 10:49:45.194796 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:45 crc kubenswrapper[4780]: I1210 10:49:45.195684 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:45 crc kubenswrapper[4780]: I1210 10:49:45.195889 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:45 crc kubenswrapper[4780]: I1210 10:49:45.196039 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:55 crc kubenswrapper[4780]: I1210 10:49:55.195405 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:55 crc kubenswrapper[4780]: I1210 10:49:55.195404 4780 patch_prober.go:28] interesting pod/downloads-7954f5f757-fxxzq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Dec 10 10:49:55 crc kubenswrapper[4780]: I1210 10:49:55.196415 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:55 crc kubenswrapper[4780]: I1210 10:49:55.196487 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fxxzq" podUID="09fe7cda-3948-484d-bcd9-e83d1ac0610a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Dec 10 10:49:55 crc kubenswrapper[4780]: I1210 10:49:55.527545 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" containerID="cri-o://c4baa2f445f3ead4e029b7dd021df8730d2bd5ca75018ef42bd3db67cff605f7" gracePeriod=15 Dec 10 10:49:55 crc kubenswrapper[4780]: I1210 10:49:55.976222 4780 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qllmj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" start-of-body= Dec 10 10:49:55 crc kubenswrapper[4780]: I1210 10:49:55.976303 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" Dec 10 10:49:58 crc kubenswrapper[4780]: I1210 10:49:58.893865 4780 generic.go:334] "Generic (PLEG): container finished" podID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerID="c4baa2f445f3ead4e029b7dd021df8730d2bd5ca75018ef42bd3db67cff605f7" exitCode=0 Dec 10 10:49:58 crc kubenswrapper[4780]: I1210 10:49:58.894011 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" event={"ID":"6e560d85-bf0c-4604-9f52-d46fe96b6fe7","Type":"ContainerDied","Data":"c4baa2f445f3ead4e029b7dd021df8730d2bd5ca75018ef42bd3db67cff605f7"} Dec 10 10:50:05 crc kubenswrapper[4780]: I1210 10:50:05.319854 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-fxxzq" Dec 10 10:50:05 crc kubenswrapper[4780]: I1210 10:50:05.959103 4780 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qllmj container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" start-of-body= Dec 10 10:50:05 crc kubenswrapper[4780]: I1210 10:50:05.959172 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" Dec 10 10:50:06 crc kubenswrapper[4780]: E1210 10:50:06.595140 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 10 10:50:06 crc kubenswrapper[4780]: E1210 10:50:06.595455 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-czq5m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-pdmqg_openshift-marketplace(59f1ed36-eccd-4cd4-af95-f32539d40314): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:50:06 crc kubenswrapper[4780]: E1210 10:50:06.596665 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-pdmqg" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" Dec 10 10:50:08 crc kubenswrapper[4780]: I1210 10:50:08.951125 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:50:08 crc kubenswrapper[4780]: I1210 10:50:08.995599 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" event={"ID":"6e560d85-bf0c-4604-9f52-d46fe96b6fe7","Type":"ContainerDied","Data":"758d20cb39318e970e5705358100b24f05aedc609cc5eae03c2cef346be887bc"} Dec 10 10:50:08 crc kubenswrapper[4780]: I1210 10:50:08.995696 4780 scope.go:117] "RemoveContainer" containerID="c4baa2f445f3ead4e029b7dd021df8730d2bd5ca75018ef42bd3db67cff605f7" Dec 10 10:50:08 crc kubenswrapper[4780]: I1210 10:50:08.995893 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qllmj" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.000038 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp"] Dec 10 10:50:09 crc kubenswrapper[4780]: E1210 10:50:09.000769 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e92a5ce-d8d1-4368-9c76-78d3dcf9471b" containerName="pruner" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.000792 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e92a5ce-d8d1-4368-9c76-78d3dcf9471b" containerName="pruner" Dec 10 10:50:09 crc kubenswrapper[4780]: E1210 10:50:09.000826 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.000835 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.001050 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" containerName="oauth-openshift" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.001071 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e92a5ce-d8d1-4368-9c76-78d3dcf9471b" containerName="pruner" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.001886 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.003905 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-dir\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.003971 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-session\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004004 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-cliconfig\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004039 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-idp-0-file-data\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004071 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-provider-selection\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004093 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-login\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004116 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-serving-cert\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004144 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gfvl\" (UniqueName: \"kubernetes.io/projected/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-kube-api-access-6gfvl\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004167 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-error\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004192 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-policies\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004242 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004264 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-service-ca\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004312 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004337 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-router-certs\") pod \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\" (UID: \"6e560d85-bf0c-4604-9f52-d46fe96b6fe7\") " Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004469 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004494 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-service-ca\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004529 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqhw2\" (UniqueName: \"kubernetes.io/projected/3e69e700-b580-4621-bb66-97f89254224d-kube-api-access-kqhw2\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004562 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004603 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e69e700-b580-4621-bb66-97f89254224d-audit-dir\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004634 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004659 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004692 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-audit-policies\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004712 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004748 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004796 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-session\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004813 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-router-certs\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004835 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-error\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.004855 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-login\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.006400 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.006738 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.008102 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.009690 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.009756 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.019022 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.019686 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.019976 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.018514 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.025880 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp"] Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.022490 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.027899 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.029708 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.030419 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.031364 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-kube-api-access-6gfvl" (OuterVolumeSpecName: "kube-api-access-6gfvl") pod "6e560d85-bf0c-4604-9f52-d46fe96b6fe7" (UID: "6e560d85-bf0c-4604-9f52-d46fe96b6fe7"). InnerVolumeSpecName "kube-api-access-6gfvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.105304 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqhw2\" (UniqueName: \"kubernetes.io/projected/3e69e700-b580-4621-bb66-97f89254224d-kube-api-access-kqhw2\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.105821 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.105978 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e69e700-b580-4621-bb66-97f89254224d-audit-dir\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.106130 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.106261 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.106391 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-audit-policies\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.106559 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.106689 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.106841 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-session\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107001 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-router-certs\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107111 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-error\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107231 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-login\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107372 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107524 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-service-ca\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107670 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107681 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107763 4780 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107779 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107796 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107817 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107832 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107846 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107858 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107870 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gfvl\" (UniqueName: \"kubernetes.io/projected/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-kube-api-access-6gfvl\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107882 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107895 4780 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107907 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107947 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.107961 4780 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6e560d85-bf0c-4604-9f52-d46fe96b6fe7-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.108533 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-audit-policies\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.109105 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-service-ca\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.109144 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e69e700-b580-4621-bb66-97f89254224d-audit-dir\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.110379 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.113673 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.113874 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.114015 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-session\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.116043 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-error\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.117874 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.120679 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-system-router-certs\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.122363 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-template-login\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.125684 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3e69e700-b580-4621-bb66-97f89254224d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.126638 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqhw2\" (UniqueName: \"kubernetes.io/projected/3e69e700-b580-4621-bb66-97f89254224d-kube-api-access-kqhw2\") pod \"oauth-openshift-69bcbbd7f8-c7vjp\" (UID: \"3e69e700-b580-4621-bb66-97f89254224d\") " pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.344358 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qllmj"] Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.349372 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qllmj"] Dec 10 10:50:09 crc kubenswrapper[4780]: I1210 10:50:09.362454 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:10 crc kubenswrapper[4780]: I1210 10:50:10.217596 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e560d85-bf0c-4604-9f52-d46fe96b6fe7" path="/var/lib/kubelet/pods/6e560d85-bf0c-4604-9f52-d46fe96b6fe7/volumes" Dec 10 10:50:10 crc kubenswrapper[4780]: I1210 10:50:10.219810 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmx2c" event={"ID":"9a59ce5a-0c36-4120-be63-8f2051a58e78","Type":"ContainerStarted","Data":"43eec9971bacaad00c607a19ece407215403cd93bd140c4b0f600212e3197d8b"} Dec 10 10:50:10 crc kubenswrapper[4780]: I1210 10:50:10.874000 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp"] Dec 10 10:50:10 crc kubenswrapper[4780]: W1210 10:50:10.893699 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e69e700_b580_4621_bb66_97f89254224d.slice/crio-1c057f97ae389de52cce7855a6c538f8c5c0c6c39eb37092ad35882e3a483282 WatchSource:0}: Error finding container 1c057f97ae389de52cce7855a6c538f8c5c0c6c39eb37092ad35882e3a483282: Status 404 returned error can't find the container with id 1c057f97ae389de52cce7855a6c538f8c5c0c6c39eb37092ad35882e3a483282 Dec 10 10:50:11 crc kubenswrapper[4780]: I1210 10:50:11.293837 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w2bd9" event={"ID":"eee88117-019c-44a5-8a7f-95a655e53a27","Type":"ContainerStarted","Data":"bc27cdadd2bfedee7361ec350a79b5901084b5a9b00023388f5ce4060d5778ad"} Dec 10 10:50:11 crc kubenswrapper[4780]: I1210 10:50:11.323181 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zms8r" event={"ID":"73eb2180-ca1d-4860-9306-982a9b3930b9","Type":"ContainerStarted","Data":"35a434c9893c108c4eb1ee0971c19bbc7d0bca566408260a579c9f845ff1d73e"} Dec 10 10:50:11 crc kubenswrapper[4780]: I1210 10:50:11.406109 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmtlf" event={"ID":"e941c8a4-98a7-48d2-9ec6-4e2dec741b54","Type":"ContainerStarted","Data":"94d624d83d382d29308fc93a0eb3ff9355627d86e2595703b15b7c9097e78fd8"} Dec 10 10:50:11 crc kubenswrapper[4780]: I1210 10:50:11.409136 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sdgl" event={"ID":"983b01b2-448a-462b-a87c-8d66c7824940","Type":"ContainerStarted","Data":"6d99b04e3ee62edfaad174148bb8dd2abbc46b1ce82328809a96d4e689f4ab6a"} Dec 10 10:50:11 crc kubenswrapper[4780]: I1210 10:50:11.410883 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" event={"ID":"3e69e700-b580-4621-bb66-97f89254224d","Type":"ContainerStarted","Data":"1c057f97ae389de52cce7855a6c538f8c5c0c6c39eb37092ad35882e3a483282"} Dec 10 10:50:11 crc kubenswrapper[4780]: I1210 10:50:11.412193 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rw4n" event={"ID":"a586027d-c0c6-4647-9318-23727f40a928","Type":"ContainerStarted","Data":"b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1"} Dec 10 10:50:11 crc kubenswrapper[4780]: I1210 10:50:11.414370 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8b9w" event={"ID":"3234cf0e-6206-4a41-8474-f1893163954f","Type":"ContainerStarted","Data":"cd21d171f98d7d75bd19036bf11078aca3d3b53a7b74ce3f019d76669a9c58f5"} Dec 10 10:50:12 crc kubenswrapper[4780]: I1210 10:50:12.485278 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" event={"ID":"3e69e700-b580-4621-bb66-97f89254224d","Type":"ContainerStarted","Data":"72051fa225a384a4ec48c261ea3f0b00d47be8f0d3d4d8cade85f94def127c39"} Dec 10 10:50:12 crc kubenswrapper[4780]: I1210 10:50:12.493899 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:12 crc kubenswrapper[4780]: I1210 10:50:12.513188 4780 patch_prober.go:28] interesting pod/oauth-openshift-69bcbbd7f8-c7vjp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": dial tcp 10.217.0.56:6443: connect: connection refused" start-of-body= Dec 10 10:50:12 crc kubenswrapper[4780]: I1210 10:50:12.513293 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" podUID="3e69e700-b580-4621-bb66-97f89254224d" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": dial tcp 10.217.0.56:6443: connect: connection refused" Dec 10 10:50:12 crc kubenswrapper[4780]: I1210 10:50:12.530307 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" podStartSLOduration=42.53026064 podStartE2EDuration="42.53026064s" podCreationTimestamp="2025-12-10 10:49:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:12.529907571 +0000 UTC m=+317.383301034" watchObservedRunningTime="2025-12-10 10:50:12.53026064 +0000 UTC m=+317.383654083" Dec 10 10:50:13 crc kubenswrapper[4780]: I1210 10:50:13.504792 4780 generic.go:334] "Generic (PLEG): container finished" podID="a586027d-c0c6-4647-9318-23727f40a928" containerID="b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1" exitCode=0 Dec 10 10:50:13 crc kubenswrapper[4780]: I1210 10:50:13.504876 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rw4n" event={"ID":"a586027d-c0c6-4647-9318-23727f40a928","Type":"ContainerDied","Data":"b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1"} Dec 10 10:50:13 crc kubenswrapper[4780]: I1210 10:50:13.508736 4780 generic.go:334] "Generic (PLEG): container finished" podID="eee88117-019c-44a5-8a7f-95a655e53a27" containerID="bc27cdadd2bfedee7361ec350a79b5901084b5a9b00023388f5ce4060d5778ad" exitCode=0 Dec 10 10:50:13 crc kubenswrapper[4780]: I1210 10:50:13.510263 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w2bd9" event={"ID":"eee88117-019c-44a5-8a7f-95a655e53a27","Type":"ContainerDied","Data":"bc27cdadd2bfedee7361ec350a79b5901084b5a9b00023388f5ce4060d5778ad"} Dec 10 10:50:13 crc kubenswrapper[4780]: I1210 10:50:13.608386 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-69bcbbd7f8-c7vjp" Dec 10 10:50:14 crc kubenswrapper[4780]: I1210 10:50:14.531808 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w2bd9" event={"ID":"eee88117-019c-44a5-8a7f-95a655e53a27","Type":"ContainerStarted","Data":"d54ce9b7069d21114f30d42a63e22ec3f0f98c073aacbcb441efcd851dd7be1e"} Dec 10 10:50:14 crc kubenswrapper[4780]: I1210 10:50:14.536814 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8b9w" event={"ID":"3234cf0e-6206-4a41-8474-f1893163954f","Type":"ContainerDied","Data":"cd21d171f98d7d75bd19036bf11078aca3d3b53a7b74ce3f019d76669a9c58f5"} Dec 10 10:50:14 crc kubenswrapper[4780]: I1210 10:50:14.536691 4780 generic.go:334] "Generic (PLEG): container finished" podID="3234cf0e-6206-4a41-8474-f1893163954f" containerID="cd21d171f98d7d75bd19036bf11078aca3d3b53a7b74ce3f019d76669a9c58f5" exitCode=0 Dec 10 10:50:14 crc kubenswrapper[4780]: I1210 10:50:14.546509 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zms8r" event={"ID":"73eb2180-ca1d-4860-9306-982a9b3930b9","Type":"ContainerDied","Data":"35a434c9893c108c4eb1ee0971c19bbc7d0bca566408260a579c9f845ff1d73e"} Dec 10 10:50:14 crc kubenswrapper[4780]: I1210 10:50:14.548142 4780 generic.go:334] "Generic (PLEG): container finished" podID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerID="35a434c9893c108c4eb1ee0971c19bbc7d0bca566408260a579c9f845ff1d73e" exitCode=0 Dec 10 10:50:14 crc kubenswrapper[4780]: I1210 10:50:14.573502 4780 generic.go:334] "Generic (PLEG): container finished" podID="983b01b2-448a-462b-a87c-8d66c7824940" containerID="6d99b04e3ee62edfaad174148bb8dd2abbc46b1ce82328809a96d4e689f4ab6a" exitCode=0 Dec 10 10:50:14 crc kubenswrapper[4780]: I1210 10:50:14.573605 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sdgl" event={"ID":"983b01b2-448a-462b-a87c-8d66c7824940","Type":"ContainerDied","Data":"6d99b04e3ee62edfaad174148bb8dd2abbc46b1ce82328809a96d4e689f4ab6a"} Dec 10 10:50:14 crc kubenswrapper[4780]: I1210 10:50:14.590870 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-w2bd9" podStartSLOduration=13.618353266 podStartE2EDuration="1m57.590844413s" podCreationTimestamp="2025-12-10 10:48:17 +0000 UTC" firstStartedPulling="2025-12-10 10:48:30.087612571 +0000 UTC m=+214.941006014" lastFinishedPulling="2025-12-10 10:50:14.060103718 +0000 UTC m=+318.913497161" observedRunningTime="2025-12-10 10:50:14.569587752 +0000 UTC m=+319.422981215" watchObservedRunningTime="2025-12-10 10:50:14.590844413 +0000 UTC m=+319.444237856" Dec 10 10:50:15 crc kubenswrapper[4780]: I1210 10:50:15.581149 4780 generic.go:334] "Generic (PLEG): container finished" podID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerID="94d624d83d382d29308fc93a0eb3ff9355627d86e2595703b15b7c9097e78fd8" exitCode=0 Dec 10 10:50:15 crc kubenswrapper[4780]: I1210 10:50:15.581232 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmtlf" event={"ID":"e941c8a4-98a7-48d2-9ec6-4e2dec741b54","Type":"ContainerDied","Data":"94d624d83d382d29308fc93a0eb3ff9355627d86e2595703b15b7c9097e78fd8"} Dec 10 10:50:15 crc kubenswrapper[4780]: I1210 10:50:15.583281 4780 generic.go:334] "Generic (PLEG): container finished" podID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerID="43eec9971bacaad00c607a19ece407215403cd93bd140c4b0f600212e3197d8b" exitCode=0 Dec 10 10:50:15 crc kubenswrapper[4780]: I1210 10:50:15.583341 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmx2c" event={"ID":"9a59ce5a-0c36-4120-be63-8f2051a58e78","Type":"ContainerDied","Data":"43eec9971bacaad00c607a19ece407215403cd93bd140c4b0f600212e3197d8b"} Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.594170 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rw4n" event={"ID":"a586027d-c0c6-4647-9318-23727f40a928","Type":"ContainerStarted","Data":"1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229"} Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.794360 4780 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.794734 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8" gracePeriod=15 Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.794835 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687" gracePeriod=15 Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.794940 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0" gracePeriod=15 Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.794906 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334" gracePeriod=15 Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.794856 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef" gracePeriod=15 Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796235 4780 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:50:16 crc kubenswrapper[4780]: E1210 10:50:16.796558 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796575 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: E1210 10:50:16.796590 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796599 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: E1210 10:50:16.796617 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796626 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 10:50:16 crc kubenswrapper[4780]: E1210 10:50:16.796640 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796647 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 10:50:16 crc kubenswrapper[4780]: E1210 10:50:16.796660 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796667 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 10:50:16 crc kubenswrapper[4780]: E1210 10:50:16.796679 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796688 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 10 10:50:16 crc kubenswrapper[4780]: E1210 10:50:16.796699 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796707 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796851 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796868 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796879 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796890 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796902 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.796911 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 10:50:16 crc kubenswrapper[4780]: E1210 10:50:16.797073 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.797086 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.797222 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.798631 4780 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.799221 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.814583 4780 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.902760 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.903195 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.903224 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.903243 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.903274 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.903420 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.903788 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:16 crc kubenswrapper[4780]: I1210 10:50:16.903874 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005275 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005350 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005382 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005421 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005442 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005465 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005516 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005538 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005551 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005567 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005574 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005591 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005619 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005681 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005705 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.005760 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.041489 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.046464 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:50:17 crc kubenswrapper[4780]: E1210 10:50:17.168184 4780 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.51:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-zms8r.187fd509d55a19aa openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-zms8r,UID:73eb2180-ca1d-4860-9306-982a9b3930b9,APIVersion:v1,ResourceVersion:28299,FieldPath:spec.containers{registry-server},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\" in 2.601s (2.601s including waiting). Image size: 907837715 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 10:50:17.163397546 +0000 UTC m=+322.016790999,LastTimestamp:2025-12-10 10:50:17.163397546 +0000 UTC m=+322.016790999,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.603552 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"403026326b90b5d5b9bbbe25ddbe37acd92e1187348b08c2e7d83ed25981c8c1"} Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.605176 4780 generic.go:334] "Generic (PLEG): container finished" podID="c7b993a1-1915-40e6-b88a-3606990443e1" containerID="4ec0cbaf267c5b2f3da41f014542631517bbabab47493dc895511c5e4eee8f6b" exitCode=0 Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.605256 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"c7b993a1-1915-40e6-b88a-3606990443e1","Type":"ContainerDied","Data":"4ec0cbaf267c5b2f3da41f014542631517bbabab47493dc895511c5e4eee8f6b"} Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.606307 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.607051 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.610044 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.611852 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.613037 4780 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687" exitCode=0 Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.613083 4780 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0" exitCode=0 Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.613099 4780 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef" exitCode=0 Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.613110 4780 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334" exitCode=2 Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.614324 4780 scope.go:117] "RemoveContainer" containerID="f71a2483b5040f737133b92baa6ea162f365c492a0d860a50ba8eabd7ca23519" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.615063 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.615373 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.615676 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:17 crc kubenswrapper[4780]: E1210 10:50:17.650325 4780 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.51:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-zms8r.187fd509d55a19aa openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-zms8r,UID:73eb2180-ca1d-4860-9306-982a9b3930b9,APIVersion:v1,ResourceVersion:28299,FieldPath:spec.containers{registry-server},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\" in 2.601s (2.601s including waiting). Image size: 907837715 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 10:50:17.163397546 +0000 UTC m=+322.016790999,LastTimestamp:2025-12-10 10:50:17.163397546 +0000 UTC m=+322.016790999,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.710665 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.711773 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.712418 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.712978 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:17 crc kubenswrapper[4780]: I1210 10:50:17.713283 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: E1210 10:50:18.446131 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:50:18Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:50:18Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:50:18Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:50:18Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[],\\\"sizeBytes\\\":1626095435},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[],\\\"sizeBytes\\\":1201960779},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: E1210 10:50:18.447144 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: E1210 10:50:18.447874 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: E1210 10:50:18.448514 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: E1210 10:50:18.449012 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: E1210 10:50:18.449045 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.624442 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86"} Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.625293 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.625913 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.626220 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.626536 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.629259 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zms8r" event={"ID":"73eb2180-ca1d-4860-9306-982a9b3930b9","Type":"ContainerStarted","Data":"370e5bf8aa13ca61758761decbac6952ee5921cefd8a8c1a839b84019738661e"} Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.630043 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.630329 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.630673 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.631124 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.631347 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.635169 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.959590 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.960194 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.960650 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.961228 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.961529 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.961813 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.962185 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.962705 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.963159 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.963472 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.963765 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.964070 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:18 crc kubenswrapper[4780]: I1210 10:50:18.964373 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.035631 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c7b993a1-1915-40e6-b88a-3606990443e1-kube-api-access\") pod \"c7b993a1-1915-40e6-b88a-3606990443e1\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.035761 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-var-lock\") pod \"c7b993a1-1915-40e6-b88a-3606990443e1\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.035860 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-kubelet-dir\") pod \"c7b993a1-1915-40e6-b88a-3606990443e1\" (UID: \"c7b993a1-1915-40e6-b88a-3606990443e1\") " Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.035899 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-var-lock" (OuterVolumeSpecName: "var-lock") pod "c7b993a1-1915-40e6-b88a-3606990443e1" (UID: "c7b993a1-1915-40e6-b88a-3606990443e1"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.036023 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c7b993a1-1915-40e6-b88a-3606990443e1" (UID: "c7b993a1-1915-40e6-b88a-3606990443e1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.036453 4780 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.036486 4780 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c7b993a1-1915-40e6-b88a-3606990443e1-var-lock\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.042145 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7b993a1-1915-40e6-b88a-3606990443e1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c7b993a1-1915-40e6-b88a-3606990443e1" (UID: "c7b993a1-1915-40e6-b88a-3606990443e1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.138072 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c7b993a1-1915-40e6-b88a-3606990443e1-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.646955 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.646954 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"c7b993a1-1915-40e6-b88a-3606990443e1","Type":"ContainerDied","Data":"ff3ac85a29cdd608f5ed81bacfd648100576ce2d129c6b330a0c6ec767637ca8"} Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.647036 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff3ac85a29cdd608f5ed81bacfd648100576ce2d129c6b330a0c6ec767637ca8" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.667260 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.667555 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.667777 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.668087 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.668306 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:19 crc kubenswrapper[4780]: I1210 10:50:19.668508 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:20 crc kubenswrapper[4780]: I1210 10:50:20.700607 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:50:20 crc kubenswrapper[4780]: I1210 10:50:20.701407 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.016056 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.016126 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.284723 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.284881 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.868378 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.869578 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.869710 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.870437 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.872130 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.872987 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.873331 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.873662 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.874036 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.874356 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.875110 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.875488 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.875835 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.876308 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.876730 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.877097 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.878176 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.918331 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.920389 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.921026 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.921247 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.921406 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.921750 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.922025 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.922389 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.922750 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.923099 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.923482 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.924057 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.924681 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.924995 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.925594 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:21 crc kubenswrapper[4780]: I1210 10:50:21.925938 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.673757 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.675292 4780 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8" exitCode=0 Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.717509 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.718155 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.718800 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.719231 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.719612 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.720039 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.720444 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.720699 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: E1210 10:50:22.760060 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-pdmqg" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.919724 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.922020 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.923427 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.923767 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.924166 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.924477 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.924760 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.925092 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.925459 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:22 crc kubenswrapper[4780]: I1210 10:50:22.925795 4780 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.098448 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.098531 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.098646 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.098825 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.098826 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.098854 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.611758 4780 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.611846 4780 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.611863 4780 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.689373 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.692130 4780 scope.go:117] "RemoveContainer" containerID="960f2d7c617f5d0b3281d431308985bb7419af77581a404554f849d22ffa1687" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.692356 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.711658 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.712196 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.712520 4780 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.712822 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.713169 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.713454 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.713762 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.714193 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:23 crc kubenswrapper[4780]: I1210 10:50:23.973851 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.177665 4780 scope.go:117] "RemoveContainer" containerID="531ca3c69106ee33211f11eba4a6fcbcf6cf5b8b6ee16193b5a118999537d2e0" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.441077 4780 scope.go:117] "RemoveContainer" containerID="d6c4b099607f36efe13488a53c66715ba756493eaa11612cef18b8174b7b48ef" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.707669 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8b9w" event={"ID":"3234cf0e-6206-4a41-8474-f1893163954f","Type":"ContainerStarted","Data":"26b24261800cb493594541c099e3f0c0ca1751477a786957168881d45803aa1f"} Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.708856 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.709285 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.709545 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.709786 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.709792 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.710440 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.711568 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.712061 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.712592 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.961405 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.961903 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.963067 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.963459 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.963741 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.964125 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.964732 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:25 crc kubenswrapper[4780]: I1210 10:50:25.965197 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:26 crc kubenswrapper[4780]: E1210 10:50:26.532932 4780 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:26 crc kubenswrapper[4780]: E1210 10:50:26.534216 4780 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:26 crc kubenswrapper[4780]: E1210 10:50:26.534996 4780 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:26 crc kubenswrapper[4780]: E1210 10:50:26.535455 4780 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:26 crc kubenswrapper[4780]: E1210 10:50:26.535893 4780 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:26 crc kubenswrapper[4780]: I1210 10:50:26.536092 4780 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 10 10:50:26 crc kubenswrapper[4780]: E1210 10:50:26.536606 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="200ms" Dec 10 10:50:26 crc kubenswrapper[4780]: E1210 10:50:26.737854 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="400ms" Dec 10 10:50:26 crc kubenswrapper[4780]: I1210 10:50:26.784825 4780 scope.go:117] "RemoveContainer" containerID="a352e1e0c2a6fcb27ed2d53b1952586d56239ac801471f41d319d4b9390f7334" Dec 10 10:50:26 crc kubenswrapper[4780]: I1210 10:50:26.806730 4780 scope.go:117] "RemoveContainer" containerID="7f0f1f88e79f81b145ad50cc2b0a6e66de12f33c6819aaa356c8e6f7808384f8" Dec 10 10:50:26 crc kubenswrapper[4780]: I1210 10:50:26.825077 4780 scope.go:117] "RemoveContainer" containerID="6a7d241fa7fd864088c8bfa29a4457b38b3e78a6846872d6f52aeebb0de99c58" Dec 10 10:50:27 crc kubenswrapper[4780]: E1210 10:50:27.138393 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="800ms" Dec 10 10:50:27 crc kubenswrapper[4780]: E1210 10:50:27.651806 4780 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.51:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-zms8r.187fd509d55a19aa openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-zms8r,UID:73eb2180-ca1d-4860-9306-982a9b3930b9,APIVersion:v1,ResourceVersion:28299,FieldPath:spec.containers{registry-server},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\" in 2.601s (2.601s including waiting). Image size: 907837715 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 10:50:17.163397546 +0000 UTC m=+322.016790999,LastTimestamp:2025-12-10 10:50:17.163397546 +0000 UTC m=+322.016790999,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 10:50:27 crc kubenswrapper[4780]: E1210 10:50:27.940472 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="1.6s" Dec 10 10:50:28 crc kubenswrapper[4780]: E1210 10:50:28.552528 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:50:28Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:50:28Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:50:28Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:50:28Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[],\\\"sizeBytes\\\":1626095435},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[],\\\"sizeBytes\\\":1201960779},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: E1210 10:50:28.553205 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: E1210 10:50:28.553537 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: E1210 10:50:28.553782 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: E1210 10:50:28.554082 4780 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: E1210 10:50:28.554107 4780 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.736773 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sdgl" event={"ID":"983b01b2-448a-462b-a87c-8d66c7824940","Type":"ContainerStarted","Data":"80a943af72a32c49e07a666ca4042a452eaf9cb54bc1c750edde8da1a1e61d61"} Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.958417 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.959855 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.960453 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.960879 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.961451 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.962262 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.963241 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.963713 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.964329 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.977745 4780 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.977815 4780 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:28 crc kubenswrapper[4780]: E1210 10:50:28.979439 4780 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:28 crc kubenswrapper[4780]: I1210 10:50:28.980090 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.478821 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.479313 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:50:29 crc kubenswrapper[4780]: E1210 10:50:29.542253 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="3.2s" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.548628 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.550315 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.550790 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.551236 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.551555 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.551833 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.552235 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.552511 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:29 crc kubenswrapper[4780]: I1210 10:50:29.552785 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.757129 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.758889 4780 status_manager.go:851] "Failed to get status for pod" podUID="983b01b2-448a-462b-a87c-8d66c7824940" pod="openshift-marketplace/community-operators-7sdgl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-7sdgl\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.759613 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.760402 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.761014 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.762095 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.762653 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.763009 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:31 crc kubenswrapper[4780]: I1210 10:50:31.763409 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: E1210 10:50:32.743687 4780 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.51:6443: connect: connection refused" interval="6.4s" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.769368 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.769456 4780 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88" exitCode=1 Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.769563 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88"} Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.770469 4780 scope.go:117] "RemoveContainer" containerID="8df4b9f29751cd2ebfbdf2b33f30fb6f519721f13d03173b45c5905e64524c88" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.771355 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.772002 4780 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.772462 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.772674 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.772951 4780 status_manager.go:851] "Failed to get status for pod" podUID="983b01b2-448a-462b-a87c-8d66c7824940" pod="openshift-marketplace/community-operators-7sdgl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-7sdgl\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.773184 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.773488 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.773796 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.774026 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:32 crc kubenswrapper[4780]: I1210 10:50:32.774289 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:34 crc kubenswrapper[4780]: I1210 10:50:34.668471 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:50:34 crc kubenswrapper[4780]: W1210 10:50:34.998049 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-04a4bda4f91127cc675a9874708cf3f04cde609b5986794c3cd5ea82d98ba10e WatchSource:0}: Error finding container 04a4bda4f91127cc675a9874708cf3f04cde609b5986794c3cd5ea82d98ba10e: Status 404 returned error can't find the container with id 04a4bda4f91127cc675a9874708cf3f04cde609b5986794c3cd5ea82d98ba10e Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.160558 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.792125 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmtlf" event={"ID":"e941c8a4-98a7-48d2-9ec6-4e2dec741b54","Type":"ContainerStarted","Data":"0942019c25a27331a47beb7a056cf3f5cd236ad087179cacba6054f6537f7ae9"} Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.794142 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.794446 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.794826 4780 status_manager.go:851] "Failed to get status for pod" podUID="983b01b2-448a-462b-a87c-8d66c7824940" pod="openshift-marketplace/community-operators-7sdgl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-7sdgl\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.795166 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.795415 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.795616 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.796239 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.796702 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.800072 4780 status_manager.go:851] "Failed to get status for pod" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" pod="openshift-marketplace/redhat-operators-dmtlf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dmtlf\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.800440 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.800985 4780 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.801814 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.802036 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"65e0e44c27054d1ef547fd13df93a762cbce0a08a0bf13b95a9bdfb6c65c2d23"} Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.802911 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.803332 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.804143 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.810318 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmx2c" event={"ID":"9a59ce5a-0c36-4120-be63-8f2051a58e78","Type":"ContainerStarted","Data":"c9523d172441d49bf4445ad06c5bd41d911bb83542ec6057cd86d9881a8dca4f"} Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.814000 4780 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="1928f4de9d2ebe789c1f7ca52e3d705383da46b44fd60b3d0e7d677839d4e53b" exitCode=0 Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.814065 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"1928f4de9d2ebe789c1f7ca52e3d705383da46b44fd60b3d0e7d677839d4e53b"} Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.814098 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"04a4bda4f91127cc675a9874708cf3f04cde609b5986794c3cd5ea82d98ba10e"} Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.814193 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.814434 4780 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.814483 4780 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:35 crc kubenswrapper[4780]: E1210 10:50:35.814834 4780 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.814966 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.815567 4780 status_manager.go:851] "Failed to get status for pod" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" pod="openshift-marketplace/redhat-operators-dmtlf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dmtlf\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.815850 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.816123 4780 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.816392 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.816627 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.816895 4780 status_manager.go:851] "Failed to get status for pod" podUID="983b01b2-448a-462b-a87c-8d66c7824940" pod="openshift-marketplace/community-operators-7sdgl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-7sdgl\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.817306 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.817479 4780 status_manager.go:851] "Failed to get status for pod" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" pod="openshift-marketplace/redhat-operators-jmx2c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jmx2c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.817845 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.818194 4780 status_manager.go:851] "Failed to get status for pod" podUID="983b01b2-448a-462b-a87c-8d66c7824940" pod="openshift-marketplace/community-operators-7sdgl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-7sdgl\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.818499 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.818863 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.819191 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.819472 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.819786 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.820110 4780 status_manager.go:851] "Failed to get status for pod" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" pod="openshift-marketplace/redhat-operators-dmtlf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dmtlf\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.820420 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.820740 4780 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.964709 4780 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.965236 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.965668 4780 status_manager.go:851] "Failed to get status for pod" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" pod="openshift-marketplace/redhat-operators-jmx2c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jmx2c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.966104 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.966394 4780 status_manager.go:851] "Failed to get status for pod" podUID="983b01b2-448a-462b-a87c-8d66c7824940" pod="openshift-marketplace/community-operators-7sdgl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-7sdgl\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.966745 4780 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.967041 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.967306 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.967523 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.968186 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.968472 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.968692 4780 status_manager.go:851] "Failed to get status for pod" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" pod="openshift-marketplace/redhat-operators-dmtlf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dmtlf\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.968896 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.969263 4780 status_manager.go:851] "Failed to get status for pod" podUID="983b01b2-448a-462b-a87c-8d66c7824940" pod="openshift-marketplace/community-operators-7sdgl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-7sdgl\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.969508 4780 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.969717 4780 status_manager.go:851] "Failed to get status for pod" podUID="3234cf0e-6206-4a41-8474-f1893163954f" pod="openshift-marketplace/community-operators-z8b9w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8b9w\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.970003 4780 status_manager.go:851] "Failed to get status for pod" podUID="a586027d-c0c6-4647-9318-23727f40a928" pod="openshift-marketplace/redhat-marketplace-4rw4n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-4rw4n\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.970261 4780 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.970668 4780 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.971468 4780 status_manager.go:851] "Failed to get status for pod" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" pod="openshift-marketplace/certified-operators-pdmqg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-pdmqg\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.971823 4780 status_manager.go:851] "Failed to get status for pod" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" pod="openshift-marketplace/redhat-operators-dmtlf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-dmtlf\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.972342 4780 status_manager.go:851] "Failed to get status for pod" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" pod="openshift-marketplace/redhat-marketplace-w2bd9" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-w2bd9\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.973439 4780 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.973772 4780 status_manager.go:851] "Failed to get status for pod" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.974056 4780 status_manager.go:851] "Failed to get status for pod" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" pod="openshift-marketplace/redhat-operators-jmx2c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jmx2c\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:35 crc kubenswrapper[4780]: I1210 10:50:35.974547 4780 status_manager.go:851] "Failed to get status for pod" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" pod="openshift-marketplace/certified-operators-zms8r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zms8r\": dial tcp 38.102.83.51:6443: connect: connection refused" Dec 10 10:50:36 crc kubenswrapper[4780]: I1210 10:50:36.822571 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"db22be576bfee0bc898ea423ddd426b856474097238e0c5df4f6efe06c69d9e7"} Dec 10 10:50:38 crc kubenswrapper[4780]: I1210 10:50:38.845739 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"db61ebffe69ff95af43b888eefdbfe5ffd5b1be45bd0fc92fb3c5ce6147bdbfa"} Dec 10 10:50:38 crc kubenswrapper[4780]: I1210 10:50:38.846643 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c945220a9a74bf600fcaf71f0f5ac68e44136713fc9aaa89857f56e538d975aa"} Dec 10 10:50:38 crc kubenswrapper[4780]: I1210 10:50:38.846704 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"88ff5d736968c1b62fdf7849f96017526e502ac23a0df64c7357b246b26a9597"} Dec 10 10:50:38 crc kubenswrapper[4780]: I1210 10:50:38.850748 4780 generic.go:334] "Generic (PLEG): container finished" podID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerID="181ef86f1d7a7ee5f8cbd8c62ee4779fa234fe9410486b1d0b94337383074784" exitCode=0 Dec 10 10:50:38 crc kubenswrapper[4780]: I1210 10:50:38.850824 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdmqg" event={"ID":"59f1ed36-eccd-4cd4-af95-f32539d40314","Type":"ContainerDied","Data":"181ef86f1d7a7ee5f8cbd8c62ee4779fa234fe9410486b1d0b94337383074784"} Dec 10 10:50:39 crc kubenswrapper[4780]: I1210 10:50:39.538858 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:50:39 crc kubenswrapper[4780]: I1210 10:50:39.864062 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdmqg" event={"ID":"59f1ed36-eccd-4cd4-af95-f32539d40314","Type":"ContainerStarted","Data":"40bd3673ff7560022ea7f5b86aae2064c5fdb9df3c95e9b91200aee7dcf9f287"} Dec 10 10:50:39 crc kubenswrapper[4780]: I1210 10:50:39.872819 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"86153170bcc1a8d24bff6c7dbc61db9561562be2b368507245dadb6d78cbcca5"} Dec 10 10:50:39 crc kubenswrapper[4780]: I1210 10:50:39.873164 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:39 crc kubenswrapper[4780]: I1210 10:50:39.873519 4780 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:39 crc kubenswrapper[4780]: I1210 10:50:39.873684 4780 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:40 crc kubenswrapper[4780]: I1210 10:50:40.731142 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:50:40 crc kubenswrapper[4780]: I1210 10:50:40.731255 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:50:40 crc kubenswrapper[4780]: I1210 10:50:40.744195 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:50:40 crc kubenswrapper[4780]: I1210 10:50:40.751625 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:50:40 crc kubenswrapper[4780]: I1210 10:50:40.779250 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:50:40 crc kubenswrapper[4780]: I1210 10:50:40.879535 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:50:40 crc kubenswrapper[4780]: I1210 10:50:40.920543 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:50:41 crc kubenswrapper[4780]: I1210 10:50:41.102453 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:50:41 crc kubenswrapper[4780]: I1210 10:50:41.117741 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:50:41 crc kubenswrapper[4780]: I1210 10:50:41.664177 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:50:41 crc kubenswrapper[4780]: I1210 10:50:41.664279 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:50:41 crc kubenswrapper[4780]: I1210 10:50:41.737347 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:50:41 crc kubenswrapper[4780]: I1210 10:50:41.930704 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:50:42 crc kubenswrapper[4780]: I1210 10:50:42.148197 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jmx2c" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="registry-server" probeResult="failure" output=< Dec 10 10:50:42 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 10:50:42 crc kubenswrapper[4780]: > Dec 10 10:50:44 crc kubenswrapper[4780]: I1210 10:50:44.098629 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:44 crc kubenswrapper[4780]: I1210 10:50:44.099419 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:44 crc kubenswrapper[4780]: I1210 10:50:44.100403 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:44 crc kubenswrapper[4780]: I1210 10:50:44.895963 4780 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:45 crc kubenswrapper[4780]: I1210 10:50:45.091208 4780 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:45 crc kubenswrapper[4780]: I1210 10:50:45.091588 4780 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:45 crc kubenswrapper[4780]: I1210 10:50:45.100841 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:45 crc kubenswrapper[4780]: I1210 10:50:45.165882 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:50:45 crc kubenswrapper[4780]: I1210 10:50:45.307522 4780 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9ad277fa-6544-4ee5-978d-df5a5af089a9" Dec 10 10:50:46 crc kubenswrapper[4780]: I1210 10:50:46.098265 4780 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:46 crc kubenswrapper[4780]: I1210 10:50:46.098306 4780 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:48 crc kubenswrapper[4780]: I1210 10:50:48.994690 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:48 crc kubenswrapper[4780]: I1210 10:50:48.996122 4780 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:48 crc kubenswrapper[4780]: I1210 10:50:48.996177 4780 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:50:49 crc kubenswrapper[4780]: I1210 10:50:49.727993 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:50:49 crc kubenswrapper[4780]: I1210 10:50:49.728326 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:50:49 crc kubenswrapper[4780]: I1210 10:50:49.770786 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:50:50 crc kubenswrapper[4780]: I1210 10:50:50.188977 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:50:51 crc kubenswrapper[4780]: I1210 10:50:51.150673 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:50:51 crc kubenswrapper[4780]: I1210 10:50:51.196187 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:50:55 crc kubenswrapper[4780]: I1210 10:50:55.106333 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 10 10:50:55 crc kubenswrapper[4780]: I1210 10:50:55.306024 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 10:50:56 crc kubenswrapper[4780]: I1210 10:50:56.044202 4780 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9ad277fa-6544-4ee5-978d-df5a5af089a9" Dec 10 10:50:56 crc kubenswrapper[4780]: I1210 10:50:56.554953 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 10 10:50:56 crc kubenswrapper[4780]: I1210 10:50:56.724216 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 10 10:50:56 crc kubenswrapper[4780]: I1210 10:50:56.746092 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 10 10:50:56 crc kubenswrapper[4780]: I1210 10:50:56.778289 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 10 10:50:56 crc kubenswrapper[4780]: I1210 10:50:56.881617 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 10 10:50:56 crc kubenswrapper[4780]: I1210 10:50:56.883301 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.046030 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.057987 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.058465 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.145564 4780 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.174527 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.229787 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.288169 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.444093 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.492501 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.494451 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.527816 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.569960 4780 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.697175 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.783629 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.788869 4780 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.845657 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.903015 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.919751 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 10 10:50:57 crc kubenswrapper[4780]: I1210 10:50:57.946798 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.022505 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.181419 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.346494 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.374762 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.480574 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.721930 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.735145 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.797101 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.811777 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 10 10:50:58 crc kubenswrapper[4780]: I1210 10:50:58.980632 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.044173 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.214064 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.250070 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.309206 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.600067 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.602854 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.603744 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.605594 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.765665 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.834395 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.837700 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.949019 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.990092 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 10 10:50:59 crc kubenswrapper[4780]: I1210 10:50:59.991967 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.052410 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.081421 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.097996 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.134190 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.275170 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.427839 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.475368 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.652444 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.714892 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.747899 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.783469 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 10 10:51:00 crc kubenswrapper[4780]: I1210 10:51:00.795570 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.053707 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.148508 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.168099 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.168100 4780 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.180322 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.245069 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.503964 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.601642 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.670392 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.705354 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.794188 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 10 10:51:01 crc kubenswrapper[4780]: I1210 10:51:01.886230 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.120565 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.163670 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.399235 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.406938 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.443490 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.482611 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.580084 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.616974 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.632670 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.654002 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.660505 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 10 10:51:02 crc kubenswrapper[4780]: I1210 10:51:02.909661 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.030320 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.046246 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.075428 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.096546 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.145148 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.174262 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.193117 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.212398 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.335974 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.336219 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.371467 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.425769 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.427481 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.478720 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.487367 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.501561 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.615245 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.679360 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.690385 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.853033 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.913744 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.984812 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 10 10:51:03 crc kubenswrapper[4780]: I1210 10:51:03.996204 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.105211 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.217334 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.241875 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.378014 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.394857 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.408773 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.426210 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.498260 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.534821 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.587313 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.607610 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 10 10:51:04 crc kubenswrapper[4780]: I1210 10:51:04.818011 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.034525 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.220002 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.303140 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.368899 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.434268 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.441709 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.476704 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.550598 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.693404 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.877284 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.896315 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 10 10:51:05 crc kubenswrapper[4780]: I1210 10:51:05.913218 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.036848 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.039179 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.174118 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.252768 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.302836 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.364550 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.529347 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.599748 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.645843 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.760166 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 10 10:51:06 crc kubenswrapper[4780]: I1210 10:51:06.854990 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.159607 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.225653 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.260005 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.280810 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.357023 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.357081 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.363356 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.364143 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.403535 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.518534 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 10 10:51:07 crc kubenswrapper[4780]: I1210 10:51:07.694572 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 10 10:51:08 crc kubenswrapper[4780]: I1210 10:51:08.110334 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 10 10:51:08 crc kubenswrapper[4780]: I1210 10:51:08.211439 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 10 10:51:08 crc kubenswrapper[4780]: I1210 10:51:08.491315 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 10 10:51:08 crc kubenswrapper[4780]: I1210 10:51:08.586237 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 10 10:51:08 crc kubenswrapper[4780]: I1210 10:51:08.634743 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 10 10:51:08 crc kubenswrapper[4780]: I1210 10:51:08.684204 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 10 10:51:08 crc kubenswrapper[4780]: I1210 10:51:08.779018 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 10 10:51:08 crc kubenswrapper[4780]: I1210 10:51:08.866904 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 10 10:51:09 crc kubenswrapper[4780]: I1210 10:51:09.033962 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 10 10:51:09 crc kubenswrapper[4780]: I1210 10:51:09.103933 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 10 10:51:09 crc kubenswrapper[4780]: I1210 10:51:09.521832 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 10 10:51:09 crc kubenswrapper[4780]: I1210 10:51:09.695093 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 10:51:17 crc kubenswrapper[4780]: I1210 10:51:17.006261 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 10 10:51:17 crc kubenswrapper[4780]: I1210 10:51:17.420832 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 10 10:51:17 crc kubenswrapper[4780]: I1210 10:51:17.919414 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 10 10:51:17 crc kubenswrapper[4780]: I1210 10:51:17.975632 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 10 10:51:19 crc kubenswrapper[4780]: I1210 10:51:19.201651 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 10 10:51:19 crc kubenswrapper[4780]: I1210 10:51:19.732735 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 10 10:51:19 crc kubenswrapper[4780]: I1210 10:51:19.741191 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 10 10:51:19 crc kubenswrapper[4780]: I1210 10:51:19.976526 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 10 10:51:21 crc kubenswrapper[4780]: I1210 10:51:21.048891 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 10 10:51:22 crc kubenswrapper[4780]: I1210 10:51:22.024012 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 10 10:51:23 crc kubenswrapper[4780]: I1210 10:51:23.550811 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 10 10:51:23 crc kubenswrapper[4780]: I1210 10:51:23.994690 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 10 10:51:24 crc kubenswrapper[4780]: I1210 10:51:24.528013 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 10 10:51:25 crc kubenswrapper[4780]: I1210 10:51:25.041266 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 10 10:51:25 crc kubenswrapper[4780]: I1210 10:51:25.250020 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 10 10:51:25 crc kubenswrapper[4780]: I1210 10:51:25.853470 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 10 10:51:25 crc kubenswrapper[4780]: I1210 10:51:25.853754 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 10 10:51:26 crc kubenswrapper[4780]: I1210 10:51:26.549964 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 10 10:51:26 crc kubenswrapper[4780]: I1210 10:51:26.624283 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 10 10:51:26 crc kubenswrapper[4780]: I1210 10:51:26.788100 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 10 10:51:27 crc kubenswrapper[4780]: I1210 10:51:27.361902 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 10 10:51:27 crc kubenswrapper[4780]: I1210 10:51:27.428762 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 10 10:51:27 crc kubenswrapper[4780]: I1210 10:51:27.932845 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 10:51:28 crc kubenswrapper[4780]: I1210 10:51:28.173171 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 10 10:51:28 crc kubenswrapper[4780]: I1210 10:51:28.458002 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 10 10:51:28 crc kubenswrapper[4780]: I1210 10:51:28.911003 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 10 10:51:30 crc kubenswrapper[4780]: I1210 10:51:30.103983 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 10 10:51:30 crc kubenswrapper[4780]: I1210 10:51:30.278663 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 10 10:51:30 crc kubenswrapper[4780]: I1210 10:51:30.490340 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 10 10:51:31 crc kubenswrapper[4780]: I1210 10:51:31.116576 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 10 10:51:31 crc kubenswrapper[4780]: I1210 10:51:31.904332 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 10 10:51:32 crc kubenswrapper[4780]: I1210 10:51:32.117821 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 10 10:51:32 crc kubenswrapper[4780]: I1210 10:51:32.525290 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 10:51:32 crc kubenswrapper[4780]: I1210 10:51:32.547545 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 10 10:51:33 crc kubenswrapper[4780]: I1210 10:51:33.000459 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 10 10:51:33 crc kubenswrapper[4780]: I1210 10:51:33.404360 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 10 10:51:33 crc kubenswrapper[4780]: I1210 10:51:33.469403 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 10 10:51:34 crc kubenswrapper[4780]: I1210 10:51:34.133243 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 10 10:51:34 crc kubenswrapper[4780]: I1210 10:51:34.419348 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 10 10:51:34 crc kubenswrapper[4780]: I1210 10:51:34.518608 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 10 10:51:34 crc kubenswrapper[4780]: I1210 10:51:34.679102 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 10 10:51:34 crc kubenswrapper[4780]: I1210 10:51:34.819876 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 10 10:51:34 crc kubenswrapper[4780]: I1210 10:51:34.967974 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 10 10:51:35 crc kubenswrapper[4780]: I1210 10:51:35.509255 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 10 10:51:35 crc kubenswrapper[4780]: I1210 10:51:35.531869 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 10 10:51:36 crc kubenswrapper[4780]: I1210 10:51:36.026138 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 10 10:51:36 crc kubenswrapper[4780]: I1210 10:51:36.451659 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 10 10:51:36 crc kubenswrapper[4780]: I1210 10:51:36.483605 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 10 10:51:36 crc kubenswrapper[4780]: I1210 10:51:36.733499 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 10 10:51:36 crc kubenswrapper[4780]: I1210 10:51:36.825322 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 10 10:51:36 crc kubenswrapper[4780]: I1210 10:51:36.837495 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 10 10:51:37 crc kubenswrapper[4780]: I1210 10:51:37.285757 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 10 10:51:37 crc kubenswrapper[4780]: I1210 10:51:37.332284 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 10 10:51:37 crc kubenswrapper[4780]: I1210 10:51:37.369353 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 10 10:51:37 crc kubenswrapper[4780]: I1210 10:51:37.577647 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 10 10:51:37 crc kubenswrapper[4780]: I1210 10:51:37.736264 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 10 10:51:37 crc kubenswrapper[4780]: I1210 10:51:37.829694 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 10 10:51:38 crc kubenswrapper[4780]: I1210 10:51:38.106216 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 10 10:51:38 crc kubenswrapper[4780]: I1210 10:51:38.913302 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 10 10:51:38 crc kubenswrapper[4780]: I1210 10:51:38.965462 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 10 10:51:39 crc kubenswrapper[4780]: I1210 10:51:39.079423 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 10:51:39 crc kubenswrapper[4780]: I1210 10:51:39.911664 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 10 10:51:40 crc kubenswrapper[4780]: I1210 10:51:40.490785 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 10 10:51:40 crc kubenswrapper[4780]: I1210 10:51:40.611085 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 10 10:51:40 crc kubenswrapper[4780]: I1210 10:51:40.848283 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 10 10:51:41 crc kubenswrapper[4780]: I1210 10:51:41.790632 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 10 10:51:41 crc kubenswrapper[4780]: I1210 10:51:41.799057 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 10 10:51:41 crc kubenswrapper[4780]: I1210 10:51:41.969714 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 10 10:51:42 crc kubenswrapper[4780]: I1210 10:51:42.319511 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 10 10:51:42 crc kubenswrapper[4780]: I1210 10:51:42.868194 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.071616 4780 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.071853 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4rw4n" podStartSLOduration=98.605396019 podStartE2EDuration="3m24.071756416s" podCreationTimestamp="2025-12-10 10:48:19 +0000 UTC" firstStartedPulling="2025-12-10 10:48:30.050044419 +0000 UTC m=+214.903437872" lastFinishedPulling="2025-12-10 10:50:15.516404826 +0000 UTC m=+320.369798269" observedRunningTime="2025-12-10 10:50:44.7018135 +0000 UTC m=+349.555206963" watchObservedRunningTime="2025-12-10 10:51:43.071756416 +0000 UTC m=+407.925149869" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.073081 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7sdgl" podStartSLOduration=91.352386297 podStartE2EDuration="3m28.07306799s" podCreationTimestamp="2025-12-10 10:48:15 +0000 UTC" firstStartedPulling="2025-12-10 10:48:30.064121431 +0000 UTC m=+214.917514874" lastFinishedPulling="2025-12-10 10:50:26.784803114 +0000 UTC m=+331.638196567" observedRunningTime="2025-12-10 10:50:44.60085831 +0000 UTC m=+349.454251753" watchObservedRunningTime="2025-12-10 10:51:43.07306799 +0000 UTC m=+407.926461443" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.074358 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jmx2c" podStartSLOduration=79.204558106 podStartE2EDuration="3m24.074345684s" podCreationTimestamp="2025-12-10 10:48:19 +0000 UTC" firstStartedPulling="2025-12-10 10:48:30.125963843 +0000 UTC m=+214.979357286" lastFinishedPulling="2025-12-10 10:50:34.995751411 +0000 UTC m=+339.849144864" observedRunningTime="2025-12-10 10:50:44.39930156 +0000 UTC m=+349.252695033" watchObservedRunningTime="2025-12-10 10:51:43.074345684 +0000 UTC m=+407.927739137" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.074948 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z8b9w" podStartSLOduration=95.446429226 podStartE2EDuration="3m28.074910709s" podCreationTimestamp="2025-12-10 10:48:15 +0000 UTC" firstStartedPulling="2025-12-10 10:48:30.133035009 +0000 UTC m=+214.986428452" lastFinishedPulling="2025-12-10 10:50:22.761516482 +0000 UTC m=+327.614909935" observedRunningTime="2025-12-10 10:50:44.657881327 +0000 UTC m=+349.511274770" watchObservedRunningTime="2025-12-10 10:51:43.074910709 +0000 UTC m=+407.928304162" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.075430 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pdmqg" podStartSLOduration=78.79125413 podStartE2EDuration="3m28.075423743s" podCreationTimestamp="2025-12-10 10:48:15 +0000 UTC" firstStartedPulling="2025-12-10 10:48:30.070032187 +0000 UTC m=+214.923425630" lastFinishedPulling="2025-12-10 10:50:39.35420181 +0000 UTC m=+344.207595243" observedRunningTime="2025-12-10 10:50:45.180475931 +0000 UTC m=+350.033869384" watchObservedRunningTime="2025-12-10 10:51:43.075423743 +0000 UTC m=+407.928817196" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.075802 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=86.075796552 podStartE2EDuration="1m26.075796552s" podCreationTimestamp="2025-12-10 10:50:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:44.796140975 +0000 UTC m=+349.649534418" watchObservedRunningTime="2025-12-10 10:51:43.075796552 +0000 UTC m=+407.929190015" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.076295 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dmtlf" podStartSLOduration=79.190685187 podStartE2EDuration="3m24.076286705s" podCreationTimestamp="2025-12-10 10:48:19 +0000 UTC" firstStartedPulling="2025-12-10 10:48:30.091440592 +0000 UTC m=+214.944834035" lastFinishedPulling="2025-12-10 10:50:34.97704211 +0000 UTC m=+339.830435553" observedRunningTime="2025-12-10 10:50:44.96058402 +0000 UTC m=+349.813977463" watchObservedRunningTime="2025-12-10 10:51:43.076286705 +0000 UTC m=+407.929680168" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.077032 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zms8r" podStartSLOduration=100.965697073 podStartE2EDuration="3m28.077023255s" podCreationTimestamp="2025-12-10 10:48:15 +0000 UTC" firstStartedPulling="2025-12-10 10:48:30.052052833 +0000 UTC m=+214.905446276" lastFinishedPulling="2025-12-10 10:50:17.163379015 +0000 UTC m=+322.016772458" observedRunningTime="2025-12-10 10:50:44.462184771 +0000 UTC m=+349.315578214" watchObservedRunningTime="2025-12-10 10:51:43.077023255 +0000 UTC m=+407.930416708" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.078884 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.079039 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.079894 4780 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.079943 4780 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="da4d38a3-53c0-417d-a86f-3496714bd352" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.096357 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 10 10:51:43 crc kubenswrapper[4780]: I1210 10:51:43.104745 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=59.104715116 podStartE2EDuration="59.104715116s" podCreationTimestamp="2025-12-10 10:50:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:51:43.103138525 +0000 UTC m=+407.956531968" watchObservedRunningTime="2025-12-10 10:51:43.104715116 +0000 UTC m=+407.958108579" Dec 10 10:51:44 crc kubenswrapper[4780]: I1210 10:51:44.418057 4780 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 10 10:51:44 crc kubenswrapper[4780]: I1210 10:51:44.418358 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 10 10:51:45 crc kubenswrapper[4780]: I1210 10:51:45.507875 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 10 10:51:46 crc kubenswrapper[4780]: I1210 10:51:46.356317 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 10 10:51:46 crc kubenswrapper[4780]: I1210 10:51:46.383567 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 10:51:47 crc kubenswrapper[4780]: I1210 10:51:47.652503 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 10 10:51:47 crc kubenswrapper[4780]: I1210 10:51:47.652659 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 10 10:51:47 crc kubenswrapper[4780]: I1210 10:51:47.686794 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 10 10:51:47 crc kubenswrapper[4780]: I1210 10:51:47.878716 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 10 10:51:48 crc kubenswrapper[4780]: I1210 10:51:48.367688 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 10 10:51:50 crc kubenswrapper[4780]: I1210 10:51:50.225317 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 10 10:51:52 crc kubenswrapper[4780]: I1210 10:51:52.282389 4780 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:51:52 crc kubenswrapper[4780]: I1210 10:51:52.282881 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86" gracePeriod=5 Dec 10 10:51:52 crc kubenswrapper[4780]: I1210 10:51:52.484092 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.475572 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.476513 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.882029 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.882754 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.968408 4780 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.982099 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.982153 4780 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="c97e03e5-198a-4b7f-a6af-dc7ce98d3860" Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.983491 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:51:57 crc kubenswrapper[4780]: I1210 10:51:57.983566 4780 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="c97e03e5-198a-4b7f-a6af-dc7ce98d3860" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041423 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041503 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041575 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041610 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041649 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041677 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041776 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041756 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.041804 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.042422 4780 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.042451 4780 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.042461 4780 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.042470 4780 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.052978 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.144535 4780 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.234878 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.235068 4780 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86" exitCode=137 Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.235140 4780 scope.go:117] "RemoveContainer" containerID="b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.235221 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.265968 4780 scope.go:117] "RemoveContainer" containerID="b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86" Dec 10 10:51:58 crc kubenswrapper[4780]: E1210 10:51:58.266646 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86\": container with ID starting with b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86 not found: ID does not exist" containerID="b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86" Dec 10 10:51:58 crc kubenswrapper[4780]: I1210 10:51:58.266713 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86"} err="failed to get container status \"b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86\": rpc error: code = NotFound desc = could not find container \"b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86\": container with ID starting with b91cd6130c2dc87f7b20a845c161c23b2590af4a45cbd77e21d39b2c598eef86 not found: ID does not exist" Dec 10 10:51:59 crc kubenswrapper[4780]: I1210 10:51:59.974512 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 10 10:52:06 crc kubenswrapper[4780]: I1210 10:52:06.982897 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc"] Dec 10 10:52:06 crc kubenswrapper[4780]: I1210 10:52:06.984005 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerName="route-controller-manager" containerID="cri-o://452275bf42979dd853254a2b1fa1a297c6e673d26d188431d6ff7a0e7393a91c" gracePeriod=30 Dec 10 10:52:06 crc kubenswrapper[4780]: I1210 10:52:06.994315 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-njfgs"] Dec 10 10:52:06 crc kubenswrapper[4780]: I1210 10:52:06.994998 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" podUID="0d2560a1-1eb3-4fad-89c4-100985ef6455" containerName="controller-manager" containerID="cri-o://858d13f121cb90f2ff79029e5d8b58e10b5ae8651f778357c993c1e2979dbfd2" gracePeriod=30 Dec 10 10:52:07 crc kubenswrapper[4780]: I1210 10:52:07.474760 4780 generic.go:334] "Generic (PLEG): container finished" podID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerID="452275bf42979dd853254a2b1fa1a297c6e673d26d188431d6ff7a0e7393a91c" exitCode=0 Dec 10 10:52:07 crc kubenswrapper[4780]: I1210 10:52:07.474864 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" event={"ID":"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99","Type":"ContainerDied","Data":"452275bf42979dd853254a2b1fa1a297c6e673d26d188431d6ff7a0e7393a91c"} Dec 10 10:52:07 crc kubenswrapper[4780]: I1210 10:52:07.477809 4780 generic.go:334] "Generic (PLEG): container finished" podID="0d2560a1-1eb3-4fad-89c4-100985ef6455" containerID="858d13f121cb90f2ff79029e5d8b58e10b5ae8651f778357c993c1e2979dbfd2" exitCode=0 Dec 10 10:52:07 crc kubenswrapper[4780]: I1210 10:52:07.477865 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" event={"ID":"0d2560a1-1eb3-4fad-89c4-100985ef6455","Type":"ContainerDied","Data":"858d13f121cb90f2ff79029e5d8b58e10b5ae8651f778357c993c1e2979dbfd2"} Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.062702 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.063713 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211519 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-serving-cert\") pod \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211655 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f6v7\" (UniqueName: \"kubernetes.io/projected/0d2560a1-1eb3-4fad-89c4-100985ef6455-kube-api-access-7f6v7\") pod \"0d2560a1-1eb3-4fad-89c4-100985ef6455\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211699 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-proxy-ca-bundles\") pod \"0d2560a1-1eb3-4fad-89c4-100985ef6455\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211727 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-config\") pod \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211792 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xphz\" (UniqueName: \"kubernetes.io/projected/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-kube-api-access-2xphz\") pod \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211812 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-client-ca\") pod \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\" (UID: \"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211853 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-client-ca\") pod \"0d2560a1-1eb3-4fad-89c4-100985ef6455\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211904 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-config\") pod \"0d2560a1-1eb3-4fad-89c4-100985ef6455\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.211944 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d2560a1-1eb3-4fad-89c4-100985ef6455-serving-cert\") pod \"0d2560a1-1eb3-4fad-89c4-100985ef6455\" (UID: \"0d2560a1-1eb3-4fad-89c4-100985ef6455\") " Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.212714 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-client-ca" (OuterVolumeSpecName: "client-ca") pod "fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" (UID: "fb9e3c6f-29bf-49b8-a0c0-c17447e36e99"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.212848 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-config" (OuterVolumeSpecName: "config") pod "fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" (UID: "fb9e3c6f-29bf-49b8-a0c0-c17447e36e99"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.213424 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-client-ca" (OuterVolumeSpecName: "client-ca") pod "0d2560a1-1eb3-4fad-89c4-100985ef6455" (UID: "0d2560a1-1eb3-4fad-89c4-100985ef6455"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.213599 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-config" (OuterVolumeSpecName: "config") pod "0d2560a1-1eb3-4fad-89c4-100985ef6455" (UID: "0d2560a1-1eb3-4fad-89c4-100985ef6455"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.213719 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "0d2560a1-1eb3-4fad-89c4-100985ef6455" (UID: "0d2560a1-1eb3-4fad-89c4-100985ef6455"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.219771 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d2560a1-1eb3-4fad-89c4-100985ef6455-kube-api-access-7f6v7" (OuterVolumeSpecName: "kube-api-access-7f6v7") pod "0d2560a1-1eb3-4fad-89c4-100985ef6455" (UID: "0d2560a1-1eb3-4fad-89c4-100985ef6455"). InnerVolumeSpecName "kube-api-access-7f6v7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.220809 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d2560a1-1eb3-4fad-89c4-100985ef6455-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0d2560a1-1eb3-4fad-89c4-100985ef6455" (UID: "0d2560a1-1eb3-4fad-89c4-100985ef6455"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.222324 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" (UID: "fb9e3c6f-29bf-49b8-a0c0-c17447e36e99"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.226097 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-kube-api-access-2xphz" (OuterVolumeSpecName: "kube-api-access-2xphz") pod "fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" (UID: "fb9e3c6f-29bf-49b8-a0c0-c17447e36e99"). InnerVolumeSpecName "kube-api-access-2xphz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314493 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f6v7\" (UniqueName: \"kubernetes.io/projected/0d2560a1-1eb3-4fad-89c4-100985ef6455-kube-api-access-7f6v7\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314557 4780 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314577 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314591 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xphz\" (UniqueName: \"kubernetes.io/projected/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-kube-api-access-2xphz\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314603 4780 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314616 4780 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314627 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d2560a1-1eb3-4fad-89c4-100985ef6455-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314639 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d2560a1-1eb3-4fad-89c4-100985ef6455-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.314651 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.487653 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" event={"ID":"fb9e3c6f-29bf-49b8-a0c0-c17447e36e99","Type":"ContainerDied","Data":"25f7b33a716dcc16081da556eddd09bad433ec8ef3a9c1cebbfc73ef3a697e6f"} Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.487735 4780 scope.go:117] "RemoveContainer" containerID="452275bf42979dd853254a2b1fa1a297c6e673d26d188431d6ff7a0e7393a91c" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.487682 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.490088 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" event={"ID":"0d2560a1-1eb3-4fad-89c4-100985ef6455","Type":"ContainerDied","Data":"fb09483c59dcac39f6ab7a83b2c2facf82c0a2e3a41354a0ea0a894ccc9121f6"} Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.490238 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-njfgs" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.512972 4780 scope.go:117] "RemoveContainer" containerID="858d13f121cb90f2ff79029e5d8b58e10b5ae8651f778357c993c1e2979dbfd2" Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.530616 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc"] Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.534613 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-fw9bc"] Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.549277 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-njfgs"] Dec 10 10:52:08 crc kubenswrapper[4780]: I1210 10:52:08.554128 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-njfgs"] Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.164850 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf"] Dec 10 10:52:09 crc kubenswrapper[4780]: E1210 10:52:09.165834 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2560a1-1eb3-4fad-89c4-100985ef6455" containerName="controller-manager" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.165864 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2560a1-1eb3-4fad-89c4-100985ef6455" containerName="controller-manager" Dec 10 10:52:09 crc kubenswrapper[4780]: E1210 10:52:09.165892 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" containerName="installer" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.165902 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" containerName="installer" Dec 10 10:52:09 crc kubenswrapper[4780]: E1210 10:52:09.165949 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerName="route-controller-manager" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.165960 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerName="route-controller-manager" Dec 10 10:52:09 crc kubenswrapper[4780]: E1210 10:52:09.165977 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.166028 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.166227 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2560a1-1eb3-4fad-89c4-100985ef6455" containerName="controller-manager" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.166251 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7b993a1-1915-40e6-b88a-3606990443e1" containerName="installer" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.166270 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.166280 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" containerName="route-controller-manager" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.167080 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.171441 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.171749 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.172103 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.172214 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.172517 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.174245 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.178123 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn"] Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.179423 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.184375 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.184688 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.184398 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.185065 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf"] Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.188739 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.188826 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.188890 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.192050 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn"] Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.200739 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.329328 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/223031c2-f69f-4e3b-881e-569cdb6e1226-serving-cert\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.329413 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-proxy-ca-bundles\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.329467 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5s8t\" (UniqueName: \"kubernetes.io/projected/223031c2-f69f-4e3b-881e-569cdb6e1226-kube-api-access-z5s8t\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.329496 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-client-ca\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.330007 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-config\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.330088 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4d85e33-3768-414e-9148-b284c5401bed-serving-cert\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.330150 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-client-ca\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.330198 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-config\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.330298 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vxpl\" (UniqueName: \"kubernetes.io/projected/f4d85e33-3768-414e-9148-b284c5401bed-kube-api-access-9vxpl\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432009 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-config\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432087 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4d85e33-3768-414e-9148-b284c5401bed-serving-cert\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432210 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-client-ca\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432267 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-config\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432311 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vxpl\" (UniqueName: \"kubernetes.io/projected/f4d85e33-3768-414e-9148-b284c5401bed-kube-api-access-9vxpl\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432341 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/223031c2-f69f-4e3b-881e-569cdb6e1226-serving-cert\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432364 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-proxy-ca-bundles\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432395 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5s8t\" (UniqueName: \"kubernetes.io/projected/223031c2-f69f-4e3b-881e-569cdb6e1226-kube-api-access-z5s8t\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.432415 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-client-ca\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.433431 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-client-ca\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.433530 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-client-ca\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.434503 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-config\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.434732 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f4d85e33-3768-414e-9148-b284c5401bed-proxy-ca-bundles\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.435167 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-config\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.437186 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/223031c2-f69f-4e3b-881e-569cdb6e1226-serving-cert\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.439782 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f4d85e33-3768-414e-9148-b284c5401bed-serving-cert\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.452191 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5s8t\" (UniqueName: \"kubernetes.io/projected/223031c2-f69f-4e3b-881e-569cdb6e1226-kube-api-access-z5s8t\") pod \"route-controller-manager-6cf858fd97-cg8vf\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.458084 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vxpl\" (UniqueName: \"kubernetes.io/projected/f4d85e33-3768-414e-9148-b284c5401bed-kube-api-access-9vxpl\") pod \"controller-manager-868cf9cbd7-jm4hn\" (UID: \"f4d85e33-3768-414e-9148-b284c5401bed\") " pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.500753 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.517137 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.849320 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn"] Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.885948 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf"] Dec 10 10:52:09 crc kubenswrapper[4780]: W1210 10:52:09.892295 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod223031c2_f69f_4e3b_881e_569cdb6e1226.slice/crio-7961092d07ae27214ca4eaf41ce26e30408d62c773bee594db6ab089698cda6c WatchSource:0}: Error finding container 7961092d07ae27214ca4eaf41ce26e30408d62c773bee594db6ab089698cda6c: Status 404 returned error can't find the container with id 7961092d07ae27214ca4eaf41ce26e30408d62c773bee594db6ab089698cda6c Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.981575 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d2560a1-1eb3-4fad-89c4-100985ef6455" path="/var/lib/kubelet/pods/0d2560a1-1eb3-4fad-89c4-100985ef6455/volumes" Dec 10 10:52:09 crc kubenswrapper[4780]: I1210 10:52:09.982653 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb9e3c6f-29bf-49b8-a0c0-c17447e36e99" path="/var/lib/kubelet/pods/fb9e3c6f-29bf-49b8-a0c0-c17447e36e99/volumes" Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.659517 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" event={"ID":"f4d85e33-3768-414e-9148-b284c5401bed","Type":"ContainerStarted","Data":"1f7970a6ac4cbac054ef4ffb50dc0cd146e1413e60760cfb81ab552ccbfb3e6e"} Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.659662 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" event={"ID":"f4d85e33-3768-414e-9148-b284c5401bed","Type":"ContainerStarted","Data":"acb2733db453a63a62fa4d522cc4699654df0105145ecc78523089b0348ebfda"} Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.659850 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.667066 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" event={"ID":"223031c2-f69f-4e3b-881e-569cdb6e1226","Type":"ContainerStarted","Data":"8e24d6fa9e64068d44a521036de9669170ec4c27876c8b380e466bbdf0d993dd"} Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.667135 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" event={"ID":"223031c2-f69f-4e3b-881e-569cdb6e1226","Type":"ContainerStarted","Data":"7961092d07ae27214ca4eaf41ce26e30408d62c773bee594db6ab089698cda6c"} Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.667161 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.682370 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.692833 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" podStartSLOduration=3.69280535 podStartE2EDuration="3.69280535s" podCreationTimestamp="2025-12-10 10:52:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:52:10.689883093 +0000 UTC m=+435.543276546" watchObservedRunningTime="2025-12-10 10:52:10.69280535 +0000 UTC m=+435.546198793" Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.716889 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" podStartSLOduration=3.7168563629999998 podStartE2EDuration="3.716856363s" podCreationTimestamp="2025-12-10 10:52:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:52:10.714982074 +0000 UTC m=+435.568375517" watchObservedRunningTime="2025-12-10 10:52:10.716856363 +0000 UTC m=+435.570249816" Dec 10 10:52:10 crc kubenswrapper[4780]: I1210 10:52:10.722307 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-868cf9cbd7-jm4hn" Dec 10 10:52:26 crc kubenswrapper[4780]: I1210 10:52:26.892456 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dmtlf"] Dec 10 10:52:26 crc kubenswrapper[4780]: I1210 10:52:26.893453 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dmtlf" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerName="registry-server" containerID="cri-o://0942019c25a27331a47beb7a056cf3f5cd236ad087179cacba6054f6537f7ae9" gracePeriod=2 Dec 10 10:52:27 crc kubenswrapper[4780]: I1210 10:52:27.417478 4780 generic.go:334] "Generic (PLEG): container finished" podID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerID="0942019c25a27331a47beb7a056cf3f5cd236ad087179cacba6054f6537f7ae9" exitCode=0 Dec 10 10:52:27 crc kubenswrapper[4780]: I1210 10:52:27.417588 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmtlf" event={"ID":"e941c8a4-98a7-48d2-9ec6-4e2dec741b54","Type":"ContainerDied","Data":"0942019c25a27331a47beb7a056cf3f5cd236ad087179cacba6054f6537f7ae9"} Dec 10 10:52:27 crc kubenswrapper[4780]: I1210 10:52:27.477292 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:52:27 crc kubenswrapper[4780]: I1210 10:52:27.477394 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:52:27 crc kubenswrapper[4780]: I1210 10:52:27.486500 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rw4n"] Dec 10 10:52:27 crc kubenswrapper[4780]: I1210 10:52:27.487156 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4rw4n" podUID="a586027d-c0c6-4647-9318-23727f40a928" containerName="registry-server" containerID="cri-o://1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229" gracePeriod=2 Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.175737 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.261911 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-utilities\") pod \"a586027d-c0c6-4647-9318-23727f40a928\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.262009 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xppl9\" (UniqueName: \"kubernetes.io/projected/a586027d-c0c6-4647-9318-23727f40a928-kube-api-access-xppl9\") pod \"a586027d-c0c6-4647-9318-23727f40a928\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.262062 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-catalog-content\") pod \"a586027d-c0c6-4647-9318-23727f40a928\" (UID: \"a586027d-c0c6-4647-9318-23727f40a928\") " Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.263399 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-utilities" (OuterVolumeSpecName: "utilities") pod "a586027d-c0c6-4647-9318-23727f40a928" (UID: "a586027d-c0c6-4647-9318-23727f40a928"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.279572 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a586027d-c0c6-4647-9318-23727f40a928-kube-api-access-xppl9" (OuterVolumeSpecName: "kube-api-access-xppl9") pod "a586027d-c0c6-4647-9318-23727f40a928" (UID: "a586027d-c0c6-4647-9318-23727f40a928"). InnerVolumeSpecName "kube-api-access-xppl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.286356 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a586027d-c0c6-4647-9318-23727f40a928" (UID: "a586027d-c0c6-4647-9318-23727f40a928"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.363564 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.363621 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xppl9\" (UniqueName: \"kubernetes.io/projected/a586027d-c0c6-4647-9318-23727f40a928-kube-api-access-xppl9\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.363637 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a586027d-c0c6-4647-9318-23727f40a928-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.377845 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.430457 4780 generic.go:334] "Generic (PLEG): container finished" podID="a586027d-c0c6-4647-9318-23727f40a928" containerID="1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229" exitCode=0 Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.430523 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rw4n" event={"ID":"a586027d-c0c6-4647-9318-23727f40a928","Type":"ContainerDied","Data":"1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229"} Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.430544 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rw4n" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.430571 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rw4n" event={"ID":"a586027d-c0c6-4647-9318-23727f40a928","Type":"ContainerDied","Data":"467c1620a254e16d0c023a9eff579ca083791d523bbf2f043a936680ff0f97b3"} Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.430598 4780 scope.go:117] "RemoveContainer" containerID="1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.433268 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmtlf" event={"ID":"e941c8a4-98a7-48d2-9ec6-4e2dec741b54","Type":"ContainerDied","Data":"719eb0a09273bf8233aaa8e9727236f0ee0fac0587cbacd4b88f79b41238376e"} Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.433357 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dmtlf" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.453346 4780 scope.go:117] "RemoveContainer" containerID="b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.469592 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rw4n"] Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.473561 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rw4n"] Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.542763 4780 scope.go:117] "RemoveContainer" containerID="05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.568022 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzxwn\" (UniqueName: \"kubernetes.io/projected/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-kube-api-access-dzxwn\") pod \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.568108 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-utilities\") pod \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.568150 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-catalog-content\") pod \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\" (UID: \"e941c8a4-98a7-48d2-9ec6-4e2dec741b54\") " Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.570501 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-utilities" (OuterVolumeSpecName: "utilities") pod "e941c8a4-98a7-48d2-9ec6-4e2dec741b54" (UID: "e941c8a4-98a7-48d2-9ec6-4e2dec741b54"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.570793 4780 scope.go:117] "RemoveContainer" containerID="1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229" Dec 10 10:52:28 crc kubenswrapper[4780]: E1210 10:52:28.571866 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229\": container with ID starting with 1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229 not found: ID does not exist" containerID="1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.571903 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229"} err="failed to get container status \"1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229\": rpc error: code = NotFound desc = could not find container \"1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229\": container with ID starting with 1c8a00fcf0d6efda62ca9d5a029eda15b72ac370cf5ce7a32b36351de84f1229 not found: ID does not exist" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.572132 4780 scope.go:117] "RemoveContainer" containerID="b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1" Dec 10 10:52:28 crc kubenswrapper[4780]: E1210 10:52:28.572716 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1\": container with ID starting with b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1 not found: ID does not exist" containerID="b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.572793 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1"} err="failed to get container status \"b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1\": rpc error: code = NotFound desc = could not find container \"b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1\": container with ID starting with b18b8647d364019d1aad35eea124d1a1f8c0da781361da8b7384e94ccc4662b1 not found: ID does not exist" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.572850 4780 scope.go:117] "RemoveContainer" containerID="05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894" Dec 10 10:52:28 crc kubenswrapper[4780]: E1210 10:52:28.573332 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894\": container with ID starting with 05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894 not found: ID does not exist" containerID="05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.573391 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894"} err="failed to get container status \"05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894\": rpc error: code = NotFound desc = could not find container \"05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894\": container with ID starting with 05abb5bae6a8325e7175a6322ba9238784e131dbf884fbb638440b479afd0894 not found: ID does not exist" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.573420 4780 scope.go:117] "RemoveContainer" containerID="0942019c25a27331a47beb7a056cf3f5cd236ad087179cacba6054f6537f7ae9" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.573913 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-kube-api-access-dzxwn" (OuterVolumeSpecName: "kube-api-access-dzxwn") pod "e941c8a4-98a7-48d2-9ec6-4e2dec741b54" (UID: "e941c8a4-98a7-48d2-9ec6-4e2dec741b54"). InnerVolumeSpecName "kube-api-access-dzxwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.594181 4780 scope.go:117] "RemoveContainer" containerID="94d624d83d382d29308fc93a0eb3ff9355627d86e2595703b15b7c9097e78fd8" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.630035 4780 scope.go:117] "RemoveContainer" containerID="bcb09aca48c3719eedd5ae3b18893f834e9b0de00d6405d0dcec6f54cb60bfcf" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.672073 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzxwn\" (UniqueName: \"kubernetes.io/projected/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-kube-api-access-dzxwn\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.672142 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.700664 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e941c8a4-98a7-48d2-9ec6-4e2dec741b54" (UID: "e941c8a4-98a7-48d2-9ec6-4e2dec741b54"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.775574 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e941c8a4-98a7-48d2-9ec6-4e2dec741b54-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.793989 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dmtlf"] Dec 10 10:52:28 crc kubenswrapper[4780]: I1210 10:52:28.802144 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dmtlf"] Dec 10 10:52:29 crc kubenswrapper[4780]: I1210 10:52:29.274469 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7sdgl"] Dec 10 10:52:29 crc kubenswrapper[4780]: I1210 10:52:29.274867 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7sdgl" podUID="983b01b2-448a-462b-a87c-8d66c7824940" containerName="registry-server" containerID="cri-o://80a943af72a32c49e07a666ca4042a452eaf9cb54bc1c750edde8da1a1e61d61" gracePeriod=2 Dec 10 10:52:29 crc kubenswrapper[4780]: I1210 10:52:29.470217 4780 generic.go:334] "Generic (PLEG): container finished" podID="983b01b2-448a-462b-a87c-8d66c7824940" containerID="80a943af72a32c49e07a666ca4042a452eaf9cb54bc1c750edde8da1a1e61d61" exitCode=0 Dec 10 10:52:29 crc kubenswrapper[4780]: I1210 10:52:29.470297 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sdgl" event={"ID":"983b01b2-448a-462b-a87c-8d66c7824940","Type":"ContainerDied","Data":"80a943af72a32c49e07a666ca4042a452eaf9cb54bc1c750edde8da1a1e61d61"} Dec 10 10:52:29 crc kubenswrapper[4780]: I1210 10:52:29.769181 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.007177 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-utilities\") pod \"983b01b2-448a-462b-a87c-8d66c7824940\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.007334 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-catalog-content\") pod \"983b01b2-448a-462b-a87c-8d66c7824940\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.007394 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kgr5\" (UniqueName: \"kubernetes.io/projected/983b01b2-448a-462b-a87c-8d66c7824940-kube-api-access-5kgr5\") pod \"983b01b2-448a-462b-a87c-8d66c7824940\" (UID: \"983b01b2-448a-462b-a87c-8d66c7824940\") " Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.008656 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-utilities" (OuterVolumeSpecName: "utilities") pod "983b01b2-448a-462b-a87c-8d66c7824940" (UID: "983b01b2-448a-462b-a87c-8d66c7824940"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.016219 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/983b01b2-448a-462b-a87c-8d66c7824940-kube-api-access-5kgr5" (OuterVolumeSpecName: "kube-api-access-5kgr5") pod "983b01b2-448a-462b-a87c-8d66c7824940" (UID: "983b01b2-448a-462b-a87c-8d66c7824940"). InnerVolumeSpecName "kube-api-access-5kgr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.017190 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a586027d-c0c6-4647-9318-23727f40a928" path="/var/lib/kubelet/pods/a586027d-c0c6-4647-9318-23727f40a928/volumes" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.018119 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" path="/var/lib/kubelet/pods/e941c8a4-98a7-48d2-9ec6-4e2dec741b54/volumes" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.037470 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zms8r"] Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.037825 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zms8r" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerName="registry-server" containerID="cri-o://370e5bf8aa13ca61758761decbac6952ee5921cefd8a8c1a839b84019738661e" gracePeriod=2 Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.094480 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "983b01b2-448a-462b-a87c-8d66c7824940" (UID: "983b01b2-448a-462b-a87c-8d66c7824940"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.108547 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.108606 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kgr5\" (UniqueName: \"kubernetes.io/projected/983b01b2-448a-462b-a87c-8d66c7824940-kube-api-access-5kgr5\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.108619 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/983b01b2-448a-462b-a87c-8d66c7824940-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.480033 4780 generic.go:334] "Generic (PLEG): container finished" podID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerID="370e5bf8aa13ca61758761decbac6952ee5921cefd8a8c1a839b84019738661e" exitCode=0 Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.480112 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zms8r" event={"ID":"73eb2180-ca1d-4860-9306-982a9b3930b9","Type":"ContainerDied","Data":"370e5bf8aa13ca61758761decbac6952ee5921cefd8a8c1a839b84019738661e"} Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.482488 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sdgl" event={"ID":"983b01b2-448a-462b-a87c-8d66c7824940","Type":"ContainerDied","Data":"f3bf79a4f10bc9e0ec93b6bf81163f03bc8eaa24ccd42bce6ee80e87707fb3e0"} Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.482535 4780 scope.go:117] "RemoveContainer" containerID="80a943af72a32c49e07a666ca4042a452eaf9cb54bc1c750edde8da1a1e61d61" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.482730 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7sdgl" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.510971 4780 scope.go:117] "RemoveContainer" containerID="6d99b04e3ee62edfaad174148bb8dd2abbc46b1ce82328809a96d4e689f4ab6a" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.523749 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7sdgl"] Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.527763 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7sdgl"] Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.570416 4780 scope.go:117] "RemoveContainer" containerID="824b8cfa81539bbf8eb4c4cf207a185d22c32d0f0fba83be23f33e8020532592" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.582633 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.718539 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7k8m7\" (UniqueName: \"kubernetes.io/projected/73eb2180-ca1d-4860-9306-982a9b3930b9-kube-api-access-7k8m7\") pod \"73eb2180-ca1d-4860-9306-982a9b3930b9\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.718697 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-catalog-content\") pod \"73eb2180-ca1d-4860-9306-982a9b3930b9\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.718756 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-utilities\") pod \"73eb2180-ca1d-4860-9306-982a9b3930b9\" (UID: \"73eb2180-ca1d-4860-9306-982a9b3930b9\") " Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.720832 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-utilities" (OuterVolumeSpecName: "utilities") pod "73eb2180-ca1d-4860-9306-982a9b3930b9" (UID: "73eb2180-ca1d-4860-9306-982a9b3930b9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.729072 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73eb2180-ca1d-4860-9306-982a9b3930b9-kube-api-access-7k8m7" (OuterVolumeSpecName: "kube-api-access-7k8m7") pod "73eb2180-ca1d-4860-9306-982a9b3930b9" (UID: "73eb2180-ca1d-4860-9306-982a9b3930b9"). InnerVolumeSpecName "kube-api-access-7k8m7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.768801 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "73eb2180-ca1d-4860-9306-982a9b3930b9" (UID: "73eb2180-ca1d-4860-9306-982a9b3930b9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.820733 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7k8m7\" (UniqueName: \"kubernetes.io/projected/73eb2180-ca1d-4860-9306-982a9b3930b9-kube-api-access-7k8m7\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.820836 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:30 crc kubenswrapper[4780]: I1210 10:52:30.820872 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73eb2180-ca1d-4860-9306-982a9b3930b9-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.492502 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zms8r" event={"ID":"73eb2180-ca1d-4860-9306-982a9b3930b9","Type":"ContainerDied","Data":"301711943ea4535a8eaeb1986744bf60809f1cae1e308d120c426f2c5121f91c"} Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.492610 4780 scope.go:117] "RemoveContainer" containerID="370e5bf8aa13ca61758761decbac6952ee5921cefd8a8c1a839b84019738661e" Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.492596 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zms8r" Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.521474 4780 scope.go:117] "RemoveContainer" containerID="35a434c9893c108c4eb1ee0971c19bbc7d0bca566408260a579c9f845ff1d73e" Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.527421 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zms8r"] Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.546738 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zms8r"] Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.560838 4780 scope.go:117] "RemoveContainer" containerID="cce45565e624e60bcce24122b2e5905b07c6dacda21fd7a2a4204e55acdeb6d5" Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.968305 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" path="/var/lib/kubelet/pods/73eb2180-ca1d-4860-9306-982a9b3930b9/volumes" Dec 10 10:52:31 crc kubenswrapper[4780]: I1210 10:52:31.969272 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="983b01b2-448a-462b-a87c-8d66c7824940" path="/var/lib/kubelet/pods/983b01b2-448a-462b-a87c-8d66c7824940/volumes" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.325486 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l9h98"] Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326283 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerName="extract-utilities" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326336 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerName="extract-utilities" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326349 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="983b01b2-448a-462b-a87c-8d66c7824940" containerName="extract-content" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326358 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="983b01b2-448a-462b-a87c-8d66c7824940" containerName="extract-content" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326375 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a586027d-c0c6-4647-9318-23727f40a928" containerName="extract-utilities" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326385 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a586027d-c0c6-4647-9318-23727f40a928" containerName="extract-utilities" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326394 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="983b01b2-448a-462b-a87c-8d66c7824940" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326402 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="983b01b2-448a-462b-a87c-8d66c7824940" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326417 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a586027d-c0c6-4647-9318-23727f40a928" containerName="extract-content" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326425 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a586027d-c0c6-4647-9318-23727f40a928" containerName="extract-content" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326440 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a586027d-c0c6-4647-9318-23727f40a928" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326450 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a586027d-c0c6-4647-9318-23727f40a928" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326461 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326469 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326482 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326490 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326503 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerName="extract-utilities" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326511 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerName="extract-utilities" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326522 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerName="extract-content" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326531 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerName="extract-content" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326545 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="983b01b2-448a-462b-a87c-8d66c7824940" containerName="extract-utilities" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326553 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="983b01b2-448a-462b-a87c-8d66c7824940" containerName="extract-utilities" Dec 10 10:52:35 crc kubenswrapper[4780]: E1210 10:52:35.326565 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerName="extract-content" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326573 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerName="extract-content" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326728 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="a586027d-c0c6-4647-9318-23727f40a928" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326744 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="73eb2180-ca1d-4860-9306-982a9b3930b9" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326759 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="e941c8a4-98a7-48d2-9ec6-4e2dec741b54" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.326773 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="983b01b2-448a-462b-a87c-8d66c7824940" containerName="registry-server" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.327375 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.347148 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l9h98"] Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.528386 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.528490 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmnrk\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-kube-api-access-pmnrk\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.528553 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-registry-tls\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.528723 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.528863 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-trusted-ca\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.528952 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-registry-certificates\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.529105 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.529147 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-bound-sa-token\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.556902 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.631076 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmnrk\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-kube-api-access-pmnrk\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.631187 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-registry-tls\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.631236 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.631288 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-trusted-ca\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.631322 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-registry-certificates\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.631361 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.631386 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-bound-sa-token\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.632313 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.633250 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-registry-certificates\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.633582 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-trusted-ca\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.640336 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-registry-tls\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.640748 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.650502 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-bound-sa-token\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.654870 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmnrk\" (UniqueName: \"kubernetes.io/projected/f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64-kube-api-access-pmnrk\") pod \"image-registry-66df7c8f76-l9h98\" (UID: \"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:35 crc kubenswrapper[4780]: I1210 10:52:35.945903 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:36 crc kubenswrapper[4780]: I1210 10:52:36.392951 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l9h98"] Dec 10 10:52:36 crc kubenswrapper[4780]: I1210 10:52:36.841622 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" event={"ID":"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64","Type":"ContainerStarted","Data":"af6ee528e48ba7b9376f407e9889f3e92bc88f00b1a95e0e46af917d49ec14ce"} Dec 10 10:52:36 crc kubenswrapper[4780]: I1210 10:52:36.841704 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" event={"ID":"f63725cd-0e4f-4daa-a6c5-7a8c08ac9a64","Type":"ContainerStarted","Data":"a0d884b1c69a2bcef42f0bc43f9f3e1e77ab71bc53a1ba65cd5ce2683fba2397"} Dec 10 10:52:36 crc kubenswrapper[4780]: I1210 10:52:36.841891 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:36 crc kubenswrapper[4780]: I1210 10:52:36.861702 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" podStartSLOduration=1.8616699209999998 podStartE2EDuration="1.861669921s" podCreationTimestamp="2025-12-10 10:52:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:52:36.860014997 +0000 UTC m=+461.713408440" watchObservedRunningTime="2025-12-10 10:52:36.861669921 +0000 UTC m=+461.715063364" Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.898645 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdmqg"] Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.901681 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pdmqg" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerName="registry-server" containerID="cri-o://40bd3673ff7560022ea7f5b86aae2064c5fdb9df3c95e9b91200aee7dcf9f287" gracePeriod=30 Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.922116 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z8b9w"] Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.922874 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z8b9w" podUID="3234cf0e-6206-4a41-8474-f1893163954f" containerName="registry-server" containerID="cri-o://26b24261800cb493594541c099e3f0c0ca1751477a786957168881d45803aa1f" gracePeriod=30 Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.932825 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lrzpt"] Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.933191 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" containerID="cri-o://e6e7b0ed6d645f4c93f46695bf63bb1ece776bfbdb34f2e6a769691fe1f2a252" gracePeriod=30 Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.941604 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w2bd9"] Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.942116 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-w2bd9" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" containerName="registry-server" containerID="cri-o://d54ce9b7069d21114f30d42a63e22ec3f0f98c073aacbcb441efcd851dd7be1e" gracePeriod=30 Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.957166 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jmx2c"] Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.957668 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jmx2c" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="registry-server" containerID="cri-o://c9523d172441d49bf4445ad06c5bd41d911bb83542ec6057cd86d9881a8dca4f" gracePeriod=30 Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.963878 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-s6pjk"] Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.974393 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:46 crc kubenswrapper[4780]: I1210 10:52:46.975889 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-s6pjk"] Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:46.986541 4780 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lrzpt container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:46.986626 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.027814 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf"] Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.028157 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7f5730ec-f362-4d4f-a032-9965ace15473-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.028214 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7f5730ec-f362-4d4f-a032-9965ace15473-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.028235 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jll78\" (UniqueName: \"kubernetes.io/projected/7f5730ec-f362-4d4f-a032-9965ace15473-kube-api-access-jll78\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.028246 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" podUID="223031c2-f69f-4e3b-881e-569cdb6e1226" containerName="route-controller-manager" containerID="cri-o://8e24d6fa9e64068d44a521036de9669170ec4c27876c8b380e466bbdf0d993dd" gracePeriod=30 Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.129970 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7f5730ec-f362-4d4f-a032-9965ace15473-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.131442 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jll78\" (UniqueName: \"kubernetes.io/projected/7f5730ec-f362-4d4f-a032-9965ace15473-kube-api-access-jll78\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.131599 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7f5730ec-f362-4d4f-a032-9965ace15473-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.133092 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7f5730ec-f362-4d4f-a032-9965ace15473-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.142609 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7f5730ec-f362-4d4f-a032-9965ace15473-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.150660 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jll78\" (UniqueName: \"kubernetes.io/projected/7f5730ec-f362-4d4f-a032-9965ace15473-kube-api-access-jll78\") pod \"marketplace-operator-79b997595-s6pjk\" (UID: \"7f5730ec-f362-4d4f-a032-9965ace15473\") " pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.311489 4780 generic.go:334] "Generic (PLEG): container finished" podID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerID="c9523d172441d49bf4445ad06c5bd41d911bb83542ec6057cd86d9881a8dca4f" exitCode=0 Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.311614 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmx2c" event={"ID":"9a59ce5a-0c36-4120-be63-8f2051a58e78","Type":"ContainerDied","Data":"c9523d172441d49bf4445ad06c5bd41d911bb83542ec6057cd86d9881a8dca4f"} Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.315255 4780 generic.go:334] "Generic (PLEG): container finished" podID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerID="e6e7b0ed6d645f4c93f46695bf63bb1ece776bfbdb34f2e6a769691fe1f2a252" exitCode=0 Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.315386 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" event={"ID":"90ac2cea-e1c2-479b-8de0-0917f3779a13","Type":"ContainerDied","Data":"e6e7b0ed6d645f4c93f46695bf63bb1ece776bfbdb34f2e6a769691fe1f2a252"} Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.318113 4780 generic.go:334] "Generic (PLEG): container finished" podID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerID="40bd3673ff7560022ea7f5b86aae2064c5fdb9df3c95e9b91200aee7dcf9f287" exitCode=0 Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.318202 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdmqg" event={"ID":"59f1ed36-eccd-4cd4-af95-f32539d40314","Type":"ContainerDied","Data":"40bd3673ff7560022ea7f5b86aae2064c5fdb9df3c95e9b91200aee7dcf9f287"} Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.320489 4780 generic.go:334] "Generic (PLEG): container finished" podID="3234cf0e-6206-4a41-8474-f1893163954f" containerID="26b24261800cb493594541c099e3f0c0ca1751477a786957168881d45803aa1f" exitCode=0 Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.320562 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8b9w" event={"ID":"3234cf0e-6206-4a41-8474-f1893163954f","Type":"ContainerDied","Data":"26b24261800cb493594541c099e3f0c0ca1751477a786957168881d45803aa1f"} Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.324288 4780 generic.go:334] "Generic (PLEG): container finished" podID="eee88117-019c-44a5-8a7f-95a655e53a27" containerID="d54ce9b7069d21114f30d42a63e22ec3f0f98c073aacbcb441efcd851dd7be1e" exitCode=0 Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.324392 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w2bd9" event={"ID":"eee88117-019c-44a5-8a7f-95a655e53a27","Type":"ContainerDied","Data":"d54ce9b7069d21114f30d42a63e22ec3f0f98c073aacbcb441efcd851dd7be1e"} Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.326277 4780 generic.go:334] "Generic (PLEG): container finished" podID="223031c2-f69f-4e3b-881e-569cdb6e1226" containerID="8e24d6fa9e64068d44a521036de9669170ec4c27876c8b380e466bbdf0d993dd" exitCode=0 Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.326314 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" event={"ID":"223031c2-f69f-4e3b-881e-569cdb6e1226","Type":"ContainerDied","Data":"8e24d6fa9e64068d44a521036de9669170ec4c27876c8b380e466bbdf0d993dd"} Dec 10 10:52:47 crc kubenswrapper[4780]: I1210 10:52:47.427869 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:47 crc kubenswrapper[4780]: E1210 10:52:47.934288 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3234cf0e_6206_4a41_8474_f1893163954f.slice/crio-conmon-26b24261800cb493594541c099e3f0c0ca1751477a786957168881d45803aa1f.scope\": RecentStats: unable to find data in memory cache]" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.342045 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdmqg" event={"ID":"59f1ed36-eccd-4cd4-af95-f32539d40314","Type":"ContainerDied","Data":"ccd9ed93ea4e0f49a1291b90acee2e0b4004e8272a46b83c143bf1d23ade9333"} Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.342623 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ccd9ed93ea4e0f49a1291b90acee2e0b4004e8272a46b83c143bf1d23ade9333" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.343734 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.345520 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8b9w" event={"ID":"3234cf0e-6206-4a41-8474-f1893163954f","Type":"ContainerDied","Data":"dcefa9125bd9f98a7cfeb73ecf19d603323ecd2f3a802f8e76ae2be265a9f29f"} Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.345552 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dcefa9125bd9f98a7cfeb73ecf19d603323ecd2f3a802f8e76ae2be265a9f29f" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.347601 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" event={"ID":"223031c2-f69f-4e3b-881e-569cdb6e1226","Type":"ContainerDied","Data":"7961092d07ae27214ca4eaf41ce26e30408d62c773bee594db6ab089698cda6c"} Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.347637 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7961092d07ae27214ca4eaf41ce26e30408d62c773bee594db6ab089698cda6c" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.351432 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" event={"ID":"90ac2cea-e1c2-479b-8de0-0917f3779a13","Type":"ContainerDied","Data":"d6375a6fa3a106ef34570a653848d6279f16e8038af1421443f222792e68a32d"} Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.351506 4780 scope.go:117] "RemoveContainer" containerID="e6e7b0ed6d645f4c93f46695bf63bb1ece776bfbdb34f2e6a769691fe1f2a252" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.351582 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lrzpt" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.356192 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.359235 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.415004 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.478060 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-utilities\") pod \"59f1ed36-eccd-4cd4-af95-f32539d40314\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.478146 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-catalog-content\") pod \"3234cf0e-6206-4a41-8474-f1893163954f\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.478174 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/223031c2-f69f-4e3b-881e-569cdb6e1226-serving-cert\") pod \"223031c2-f69f-4e3b-881e-569cdb6e1226\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.478249 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-config\") pod \"223031c2-f69f-4e3b-881e-569cdb6e1226\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.479496 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-utilities" (OuterVolumeSpecName: "utilities") pod "59f1ed36-eccd-4cd4-af95-f32539d40314" (UID: "59f1ed36-eccd-4cd4-af95-f32539d40314"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486036 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9z2hv\" (UniqueName: \"kubernetes.io/projected/90ac2cea-e1c2-479b-8de0-0917f3779a13-kube-api-access-9z2hv\") pod \"90ac2cea-e1c2-479b-8de0-0917f3779a13\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486185 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-trusted-ca\") pod \"90ac2cea-e1c2-479b-8de0-0917f3779a13\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486293 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5s8t\" (UniqueName: \"kubernetes.io/projected/223031c2-f69f-4e3b-881e-569cdb6e1226-kube-api-access-z5s8t\") pod \"223031c2-f69f-4e3b-881e-569cdb6e1226\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486349 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-catalog-content\") pod \"59f1ed36-eccd-4cd4-af95-f32539d40314\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486423 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-utilities\") pod \"3234cf0e-6206-4a41-8474-f1893163954f\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486474 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kql5b\" (UniqueName: \"kubernetes.io/projected/3234cf0e-6206-4a41-8474-f1893163954f-kube-api-access-kql5b\") pod \"3234cf0e-6206-4a41-8474-f1893163954f\" (UID: \"3234cf0e-6206-4a41-8474-f1893163954f\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486501 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-client-ca\") pod \"223031c2-f69f-4e3b-881e-569cdb6e1226\" (UID: \"223031c2-f69f-4e3b-881e-569cdb6e1226\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486537 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czq5m\" (UniqueName: \"kubernetes.io/projected/59f1ed36-eccd-4cd4-af95-f32539d40314-kube-api-access-czq5m\") pod \"59f1ed36-eccd-4cd4-af95-f32539d40314\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486582 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-operator-metrics\") pod \"90ac2cea-e1c2-479b-8de0-0917f3779a13\" (UID: \"90ac2cea-e1c2-479b-8de0-0917f3779a13\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.486669 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-config" (OuterVolumeSpecName: "config") pod "223031c2-f69f-4e3b-881e-569cdb6e1226" (UID: "223031c2-f69f-4e3b-881e-569cdb6e1226"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.487482 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/223031c2-f69f-4e3b-881e-569cdb6e1226-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "223031c2-f69f-4e3b-881e-569cdb6e1226" (UID: "223031c2-f69f-4e3b-881e-569cdb6e1226"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.492391 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.492436 4780 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/223031c2-f69f-4e3b-881e-569cdb6e1226-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.492451 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.493936 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "90ac2cea-e1c2-479b-8de0-0917f3779a13" (UID: "90ac2cea-e1c2-479b-8de0-0917f3779a13"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.495066 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-client-ca" (OuterVolumeSpecName: "client-ca") pod "223031c2-f69f-4e3b-881e-569cdb6e1226" (UID: "223031c2-f69f-4e3b-881e-569cdb6e1226"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.496194 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/223031c2-f69f-4e3b-881e-569cdb6e1226-kube-api-access-z5s8t" (OuterVolumeSpecName: "kube-api-access-z5s8t") pod "223031c2-f69f-4e3b-881e-569cdb6e1226" (UID: "223031c2-f69f-4e3b-881e-569cdb6e1226"). InnerVolumeSpecName "kube-api-access-z5s8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.497280 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-utilities" (OuterVolumeSpecName: "utilities") pod "3234cf0e-6206-4a41-8474-f1893163954f" (UID: "3234cf0e-6206-4a41-8474-f1893163954f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.504820 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3234cf0e-6206-4a41-8474-f1893163954f-kube-api-access-kql5b" (OuterVolumeSpecName: "kube-api-access-kql5b") pod "3234cf0e-6206-4a41-8474-f1893163954f" (UID: "3234cf0e-6206-4a41-8474-f1893163954f"). InnerVolumeSpecName "kube-api-access-kql5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.505911 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90ac2cea-e1c2-479b-8de0-0917f3779a13-kube-api-access-9z2hv" (OuterVolumeSpecName: "kube-api-access-9z2hv") pod "90ac2cea-e1c2-479b-8de0-0917f3779a13" (UID: "90ac2cea-e1c2-479b-8de0-0917f3779a13"). InnerVolumeSpecName "kube-api-access-9z2hv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.516584 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59f1ed36-eccd-4cd4-af95-f32539d40314-kube-api-access-czq5m" (OuterVolumeSpecName: "kube-api-access-czq5m") pod "59f1ed36-eccd-4cd4-af95-f32539d40314" (UID: "59f1ed36-eccd-4cd4-af95-f32539d40314"). InnerVolumeSpecName "kube-api-access-czq5m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.522215 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.524579 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "90ac2cea-e1c2-479b-8de0-0917f3779a13" (UID: "90ac2cea-e1c2-479b-8de0-0917f3779a13"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.538389 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.593723 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3234cf0e-6206-4a41-8474-f1893163954f" (UID: "3234cf0e-6206-4a41-8474-f1893163954f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.594276 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "59f1ed36-eccd-4cd4-af95-f32539d40314" (UID: "59f1ed36-eccd-4cd4-af95-f32539d40314"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.594370 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-utilities\") pod \"9a59ce5a-0c36-4120-be63-8f2051a58e78\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.594440 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-utilities\") pod \"eee88117-019c-44a5-8a7f-95a655e53a27\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.594526 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjf86\" (UniqueName: \"kubernetes.io/projected/eee88117-019c-44a5-8a7f-95a655e53a27-kube-api-access-hjf86\") pod \"eee88117-019c-44a5-8a7f-95a655e53a27\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.594556 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-catalog-content\") pod \"59f1ed36-eccd-4cd4-af95-f32539d40314\" (UID: \"59f1ed36-eccd-4cd4-af95-f32539d40314\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.594657 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-catalog-content\") pod \"9a59ce5a-0c36-4120-be63-8f2051a58e78\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.594698 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-catalog-content\") pod \"eee88117-019c-44a5-8a7f-95a655e53a27\" (UID: \"eee88117-019c-44a5-8a7f-95a655e53a27\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.594728 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fr75z\" (UniqueName: \"kubernetes.io/projected/9a59ce5a-0c36-4120-be63-8f2051a58e78-kube-api-access-fr75z\") pod \"9a59ce5a-0c36-4120-be63-8f2051a58e78\" (UID: \"9a59ce5a-0c36-4120-be63-8f2051a58e78\") " Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595037 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5s8t\" (UniqueName: \"kubernetes.io/projected/223031c2-f69f-4e3b-881e-569cdb6e1226-kube-api-access-z5s8t\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595057 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595071 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kql5b\" (UniqueName: \"kubernetes.io/projected/3234cf0e-6206-4a41-8474-f1893163954f-kube-api-access-kql5b\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595085 4780 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/223031c2-f69f-4e3b-881e-569cdb6e1226-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595097 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czq5m\" (UniqueName: \"kubernetes.io/projected/59f1ed36-eccd-4cd4-af95-f32539d40314-kube-api-access-czq5m\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595113 4780 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595126 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3234cf0e-6206-4a41-8474-f1893163954f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595138 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9z2hv\" (UniqueName: \"kubernetes.io/projected/90ac2cea-e1c2-479b-8de0-0917f3779a13-kube-api-access-9z2hv\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.595151 4780 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ac2cea-e1c2-479b-8de0-0917f3779a13-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: W1210 10:52:48.600431 4780 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/59f1ed36-eccd-4cd4-af95-f32539d40314/volumes/kubernetes.io~empty-dir/catalog-content Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.600516 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "59f1ed36-eccd-4cd4-af95-f32539d40314" (UID: "59f1ed36-eccd-4cd4-af95-f32539d40314"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.600667 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a59ce5a-0c36-4120-be63-8f2051a58e78-kube-api-access-fr75z" (OuterVolumeSpecName: "kube-api-access-fr75z") pod "9a59ce5a-0c36-4120-be63-8f2051a58e78" (UID: "9a59ce5a-0c36-4120-be63-8f2051a58e78"). InnerVolumeSpecName "kube-api-access-fr75z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.601179 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-utilities" (OuterVolumeSpecName: "utilities") pod "9a59ce5a-0c36-4120-be63-8f2051a58e78" (UID: "9a59ce5a-0c36-4120-be63-8f2051a58e78"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.603911 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eee88117-019c-44a5-8a7f-95a655e53a27-kube-api-access-hjf86" (OuterVolumeSpecName: "kube-api-access-hjf86") pod "eee88117-019c-44a5-8a7f-95a655e53a27" (UID: "eee88117-019c-44a5-8a7f-95a655e53a27"). InnerVolumeSpecName "kube-api-access-hjf86". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.613955 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-utilities" (OuterVolumeSpecName: "utilities") pod "eee88117-019c-44a5-8a7f-95a655e53a27" (UID: "eee88117-019c-44a5-8a7f-95a655e53a27"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.635984 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eee88117-019c-44a5-8a7f-95a655e53a27" (UID: "eee88117-019c-44a5-8a7f-95a655e53a27"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.692543 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-s6pjk"] Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.724407 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.724443 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fr75z\" (UniqueName: \"kubernetes.io/projected/9a59ce5a-0c36-4120-be63-8f2051a58e78-kube-api-access-fr75z\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.724458 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.724470 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eee88117-019c-44a5-8a7f-95a655e53a27-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.724482 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59f1ed36-eccd-4cd4-af95-f32539d40314-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.724493 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjf86\" (UniqueName: \"kubernetes.io/projected/eee88117-019c-44a5-8a7f-95a655e53a27-kube-api-access-hjf86\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.761215 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lrzpt"] Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.766301 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lrzpt"] Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.784356 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a59ce5a-0c36-4120-be63-8f2051a58e78" (UID: "9a59ce5a-0c36-4120-be63-8f2051a58e78"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:52:48 crc kubenswrapper[4780]: I1210 10:52:48.826000 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a59ce5a-0c36-4120-be63-8f2051a58e78-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.361601 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmx2c" event={"ID":"9a59ce5a-0c36-4120-be63-8f2051a58e78","Type":"ContainerDied","Data":"a4a19ad29eebef4b448fcc67a3babccd410651fbc9bdaaba619ab6f5d10e4620"} Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.362142 4780 scope.go:117] "RemoveContainer" containerID="c9523d172441d49bf4445ad06c5bd41d911bb83542ec6057cd86d9881a8dca4f" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.361657 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jmx2c" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.365344 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" event={"ID":"7f5730ec-f362-4d4f-a032-9965ace15473","Type":"ContainerStarted","Data":"45e7c06b3b83e5c35cc7da5863bca677d1d0e7ebe75db849926151f66a93ac7c"} Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.365415 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" event={"ID":"7f5730ec-f362-4d4f-a032-9965ace15473","Type":"ContainerStarted","Data":"f1d3ce38878342c5c06ec0527a8d7ae75527057fe138e570a4522737632c76d6"} Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.366701 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.370702 4780 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-s6pjk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.61:8080/healthz\": dial tcp 10.217.0.61:8080: connect: connection refused" start-of-body= Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.370799 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" podUID="7f5730ec-f362-4d4f-a032-9965ace15473" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.61:8080/healthz\": dial tcp 10.217.0.61:8080: connect: connection refused" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.376223 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.378530 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w2bd9" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.381321 4780 scope.go:117] "RemoveContainer" containerID="43eec9971bacaad00c607a19ece407215403cd93bd140c4b0f600212e3197d8b" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.381872 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w2bd9" event={"ID":"eee88117-019c-44a5-8a7f-95a655e53a27","Type":"ContainerDied","Data":"82594c8c7c6e340f24019b36a4664a1857b60b07ac93f817dba36141d331e5a4"} Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.382616 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8b9w" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.384809 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdmqg" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.401245 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" podStartSLOduration=3.401215457 podStartE2EDuration="3.401215457s" podCreationTimestamp="2025-12-10 10:52:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:52:49.399124931 +0000 UTC m=+474.252518394" watchObservedRunningTime="2025-12-10 10:52:49.401215457 +0000 UTC m=+474.254608900" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.424590 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jmx2c"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.429380 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jmx2c"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.436332 4780 scope.go:117] "RemoveContainer" containerID="16efd2c094162ffd9427ac555ed1a726c6c3caec8549fa2d103e09372301d48a" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.454272 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.459449 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf858fd97-cg8vf"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.478489 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z8b9w"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.479342 4780 scope.go:117] "RemoveContainer" containerID="d54ce9b7069d21114f30d42a63e22ec3f0f98c073aacbcb441efcd851dd7be1e" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.485676 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z8b9w"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.491775 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w2bd9"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.495785 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-w2bd9"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.500876 4780 scope.go:117] "RemoveContainer" containerID="bc27cdadd2bfedee7361ec350a79b5901084b5a9b00023388f5ce4060d5778ad" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.512103 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdmqg"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.516747 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pdmqg"] Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.526160 4780 scope.go:117] "RemoveContainer" containerID="2f330049483197fc3fc8845f679b7b8a2b5d6924516d31269a45d57241452e27" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.970685 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="223031c2-f69f-4e3b-881e-569cdb6e1226" path="/var/lib/kubelet/pods/223031c2-f69f-4e3b-881e-569cdb6e1226/volumes" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.971640 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3234cf0e-6206-4a41-8474-f1893163954f" path="/var/lib/kubelet/pods/3234cf0e-6206-4a41-8474-f1893163954f/volumes" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.972758 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" path="/var/lib/kubelet/pods/59f1ed36-eccd-4cd4-af95-f32539d40314/volumes" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.974404 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" path="/var/lib/kubelet/pods/90ac2cea-e1c2-479b-8de0-0917f3779a13/volumes" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.975185 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" path="/var/lib/kubelet/pods/9a59ce5a-0c36-4120-be63-8f2051a58e78/volumes" Dec 10 10:52:49 crc kubenswrapper[4780]: I1210 10:52:49.976006 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" path="/var/lib/kubelet/pods/eee88117-019c-44a5-8a7f-95a655e53a27/volumes" Dec 10 10:52:50 crc kubenswrapper[4780]: I1210 10:52:50.389018 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-s6pjk" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089159 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hsqfx"] Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089405 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerName="extract-content" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089427 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerName="extract-content" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089448 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089455 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089466 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3234cf0e-6206-4a41-8474-f1893163954f" containerName="extract-utilities" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089474 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3234cf0e-6206-4a41-8474-f1893163954f" containerName="extract-utilities" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089487 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3234cf0e-6206-4a41-8474-f1893163954f" containerName="extract-content" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089495 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3234cf0e-6206-4a41-8474-f1893163954f" containerName="extract-content" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089507 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089518 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089531 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="223031c2-f69f-4e3b-881e-569cdb6e1226" containerName="route-controller-manager" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089539 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="223031c2-f69f-4e3b-881e-569cdb6e1226" containerName="route-controller-manager" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089569 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" containerName="extract-utilities" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089576 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" containerName="extract-utilities" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089585 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089591 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089599 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="extract-content" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089644 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="extract-content" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089655 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3234cf0e-6206-4a41-8474-f1893163954f" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089662 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3234cf0e-6206-4a41-8474-f1893163954f" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089676 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" containerName="extract-content" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089682 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" containerName="extract-content" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089692 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089698 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089708 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="extract-utilities" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089715 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="extract-utilities" Dec 10 10:52:51 crc kubenswrapper[4780]: E1210 10:52:51.089724 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerName="extract-utilities" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089730 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerName="extract-utilities" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089827 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="eee88117-019c-44a5-8a7f-95a655e53a27" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089863 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="223031c2-f69f-4e3b-881e-569cdb6e1226" containerName="route-controller-manager" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089875 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a59ce5a-0c36-4120-be63-8f2051a58e78" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089883 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="90ac2cea-e1c2-479b-8de0-0917f3779a13" containerName="marketplace-operator" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089892 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="3234cf0e-6206-4a41-8474-f1893163954f" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.089903 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="59f1ed36-eccd-4cd4-af95-f32539d40314" containerName="registry-server" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.090856 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.096713 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.097892 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hsqfx"] Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.188681 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f530e21-48e6-453e-bb57-7d4ff179e1fc-catalog-content\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.188761 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jgrq\" (UniqueName: \"kubernetes.io/projected/0f530e21-48e6-453e-bb57-7d4ff179e1fc-kube-api-access-5jgrq\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.188794 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f530e21-48e6-453e-bb57-7d4ff179e1fc-utilities\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.280706 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-crkcj"] Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.282573 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.286304 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.290751 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f530e21-48e6-453e-bb57-7d4ff179e1fc-catalog-content\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.290831 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-utilities\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.290850 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6srh\" (UniqueName: \"kubernetes.io/projected/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-kube-api-access-v6srh\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.290873 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jgrq\" (UniqueName: \"kubernetes.io/projected/0f530e21-48e6-453e-bb57-7d4ff179e1fc-kube-api-access-5jgrq\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.290904 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f530e21-48e6-453e-bb57-7d4ff179e1fc-utilities\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.290949 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-catalog-content\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.291578 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f530e21-48e6-453e-bb57-7d4ff179e1fc-utilities\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.291667 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f530e21-48e6-453e-bb57-7d4ff179e1fc-catalog-content\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.296466 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-crkcj"] Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.328578 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jgrq\" (UniqueName: \"kubernetes.io/projected/0f530e21-48e6-453e-bb57-7d4ff179e1fc-kube-api-access-5jgrq\") pod \"redhat-marketplace-hsqfx\" (UID: \"0f530e21-48e6-453e-bb57-7d4ff179e1fc\") " pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.341754 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22"] Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.342706 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.344977 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.345479 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.345779 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.345856 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.345854 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.346125 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.356155 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22"] Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.391864 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8jsz\" (UniqueName: \"kubernetes.io/projected/ad684bf2-640b-4142-905e-7a0ec4ef5f59-kube-api-access-k8jsz\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.391938 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ad684bf2-640b-4142-905e-7a0ec4ef5f59-client-ca\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.391976 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad684bf2-640b-4142-905e-7a0ec4ef5f59-serving-cert\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.392197 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-utilities\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.392237 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6srh\" (UniqueName: \"kubernetes.io/projected/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-kube-api-access-v6srh\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.392365 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-catalog-content\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.392457 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad684bf2-640b-4142-905e-7a0ec4ef5f59-config\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.392940 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-utilities\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.393224 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-catalog-content\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.416995 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.417095 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6srh\" (UniqueName: \"kubernetes.io/projected/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-kube-api-access-v6srh\") pod \"redhat-operators-crkcj\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.495355 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad684bf2-640b-4142-905e-7a0ec4ef5f59-config\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.495442 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8jsz\" (UniqueName: \"kubernetes.io/projected/ad684bf2-640b-4142-905e-7a0ec4ef5f59-kube-api-access-k8jsz\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.495470 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ad684bf2-640b-4142-905e-7a0ec4ef5f59-client-ca\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.495499 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad684bf2-640b-4142-905e-7a0ec4ef5f59-serving-cert\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.497541 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ad684bf2-640b-4142-905e-7a0ec4ef5f59-client-ca\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.500120 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad684bf2-640b-4142-905e-7a0ec4ef5f59-config\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.509064 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad684bf2-640b-4142-905e-7a0ec4ef5f59-serving-cert\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.514070 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8jsz\" (UniqueName: \"kubernetes.io/projected/ad684bf2-640b-4142-905e-7a0ec4ef5f59-kube-api-access-k8jsz\") pod \"route-controller-manager-59f54ccf59-fzd22\" (UID: \"ad684bf2-640b-4142-905e-7a0ec4ef5f59\") " pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.606828 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.617331 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hsqfx"] Dec 10 10:52:51 crc kubenswrapper[4780]: I1210 10:52:51.675593 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.041431 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-crkcj"] Dec 10 10:52:52 crc kubenswrapper[4780]: W1210 10:52:52.048618 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2dc3d032_b524_4c67_9bc1_b8d8f3554b3b.slice/crio-526c4d23adec4705afef1fe14f067df25fbe3867393a04ce86efe1e47478c651 WatchSource:0}: Error finding container 526c4d23adec4705afef1fe14f067df25fbe3867393a04ce86efe1e47478c651: Status 404 returned error can't find the container with id 526c4d23adec4705afef1fe14f067df25fbe3867393a04ce86efe1e47478c651 Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.126622 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22"] Dec 10 10:52:52 crc kubenswrapper[4780]: W1210 10:52:52.139738 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad684bf2_640b_4142_905e_7a0ec4ef5f59.slice/crio-fc027dad1d510daabe86cf0083258168f167ac56d06ea42cd27103eb004bd31c WatchSource:0}: Error finding container fc027dad1d510daabe86cf0083258168f167ac56d06ea42cd27103eb004bd31c: Status 404 returned error can't find the container with id fc027dad1d510daabe86cf0083258168f167ac56d06ea42cd27103eb004bd31c Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.412224 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" event={"ID":"ad684bf2-640b-4142-905e-7a0ec4ef5f59","Type":"ContainerStarted","Data":"fc027dad1d510daabe86cf0083258168f167ac56d06ea42cd27103eb004bd31c"} Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.415269 4780 generic.go:334] "Generic (PLEG): container finished" podID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerID="9de6c01a8076df366571763c1f53ead6c965defb7ca5c26326348176a6772e61" exitCode=0 Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.415350 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crkcj" event={"ID":"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b","Type":"ContainerDied","Data":"9de6c01a8076df366571763c1f53ead6c965defb7ca5c26326348176a6772e61"} Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.415377 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crkcj" event={"ID":"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b","Type":"ContainerStarted","Data":"526c4d23adec4705afef1fe14f067df25fbe3867393a04ce86efe1e47478c651"} Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.417149 4780 generic.go:334] "Generic (PLEG): container finished" podID="0f530e21-48e6-453e-bb57-7d4ff179e1fc" containerID="de6eb158185eabc1df9083f6a302bc96da147c911853ac1c05d42b7017562391" exitCode=0 Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.417385 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hsqfx" event={"ID":"0f530e21-48e6-453e-bb57-7d4ff179e1fc","Type":"ContainerDied","Data":"de6eb158185eabc1df9083f6a302bc96da147c911853ac1c05d42b7017562391"} Dec 10 10:52:52 crc kubenswrapper[4780]: I1210 10:52:52.417481 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hsqfx" event={"ID":"0f530e21-48e6-453e-bb57-7d4ff179e1fc","Type":"ContainerStarted","Data":"852dce7c4b9b7cab3782a5b5b6937690f77d10ae40c25be433b8126b579110dd"} Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.427509 4780 generic.go:334] "Generic (PLEG): container finished" podID="0f530e21-48e6-453e-bb57-7d4ff179e1fc" containerID="8518155024e1e3c4b4af15eab62148ad1d97c4c9a7fd231e2fb1771f5cbc060b" exitCode=0 Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.427595 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hsqfx" event={"ID":"0f530e21-48e6-453e-bb57-7d4ff179e1fc","Type":"ContainerDied","Data":"8518155024e1e3c4b4af15eab62148ad1d97c4c9a7fd231e2fb1771f5cbc060b"} Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.432317 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" event={"ID":"ad684bf2-640b-4142-905e-7a0ec4ef5f59","Type":"ContainerStarted","Data":"9b156f4fd8423df1e295f978c78afb20613669f2651d658bce5b0bc868e354da"} Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.432638 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.437387 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crkcj" event={"ID":"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b","Type":"ContainerStarted","Data":"e4108e2c1f932b13e2fb7de8d846511af4941f9311cc2a0c449fbfdf1e5dc690"} Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.439815 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.486784 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wmt8w"] Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.488535 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.500651 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.504594 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-59f54ccf59-fzd22" podStartSLOduration=6.5045145170000005 podStartE2EDuration="6.504514517s" podCreationTimestamp="2025-12-10 10:52:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:52:53.50126972 +0000 UTC m=+478.354663173" watchObservedRunningTime="2025-12-10 10:52:53.504514517 +0000 UTC m=+478.357907960" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.517478 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wmt8w"] Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.629579 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kshh\" (UniqueName: \"kubernetes.io/projected/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-kube-api-access-7kshh\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.629832 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-catalog-content\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.629885 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-utilities\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.734527 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-catalog-content\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.734656 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-utilities\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.734822 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kshh\" (UniqueName: \"kubernetes.io/projected/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-kube-api-access-7kshh\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.735857 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-utilities\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.736373 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-catalog-content\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.816116 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vcdcr"] Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.817951 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.822339 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.836776 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kshh\" (UniqueName: \"kubernetes.io/projected/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-kube-api-access-7kshh\") pod \"certified-operators-wmt8w\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.843451 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vcdcr"] Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.938061 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de46d142-d66a-4f62-887e-c1a0ef0e8da5-utilities\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.938127 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de46d142-d66a-4f62-887e-c1a0ef0e8da5-catalog-content\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:53 crc kubenswrapper[4780]: I1210 10:52:53.938151 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qm8lr\" (UniqueName: \"kubernetes.io/projected/de46d142-d66a-4f62-887e-c1a0ef0e8da5-kube-api-access-qm8lr\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.039403 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de46d142-d66a-4f62-887e-c1a0ef0e8da5-utilities\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.039506 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de46d142-d66a-4f62-887e-c1a0ef0e8da5-catalog-content\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.039542 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qm8lr\" (UniqueName: \"kubernetes.io/projected/de46d142-d66a-4f62-887e-c1a0ef0e8da5-kube-api-access-qm8lr\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.040659 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de46d142-d66a-4f62-887e-c1a0ef0e8da5-utilities\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.041162 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de46d142-d66a-4f62-887e-c1a0ef0e8da5-catalog-content\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.066606 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qm8lr\" (UniqueName: \"kubernetes.io/projected/de46d142-d66a-4f62-887e-c1a0ef0e8da5-kube-api-access-qm8lr\") pod \"community-operators-vcdcr\" (UID: \"de46d142-d66a-4f62-887e-c1a0ef0e8da5\") " pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.122595 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.163611 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.449779 4780 generic.go:334] "Generic (PLEG): container finished" podID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerID="e4108e2c1f932b13e2fb7de8d846511af4941f9311cc2a0c449fbfdf1e5dc690" exitCode=0 Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.450318 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crkcj" event={"ID":"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b","Type":"ContainerDied","Data":"e4108e2c1f932b13e2fb7de8d846511af4941f9311cc2a0c449fbfdf1e5dc690"} Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.460609 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hsqfx" event={"ID":"0f530e21-48e6-453e-bb57-7d4ff179e1fc","Type":"ContainerStarted","Data":"ebdfa2567a0527d7630b6e2a20f8db0f1a2750effab0cec9945c6cbae2824046"} Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.513682 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hsqfx" podStartSLOduration=1.960881279 podStartE2EDuration="3.513647969s" podCreationTimestamp="2025-12-10 10:52:51 +0000 UTC" firstStartedPulling="2025-12-10 10:52:52.424077994 +0000 UTC m=+477.277471467" lastFinishedPulling="2025-12-10 10:52:53.976844714 +0000 UTC m=+478.830238157" observedRunningTime="2025-12-10 10:52:54.508783139 +0000 UTC m=+479.362176592" watchObservedRunningTime="2025-12-10 10:52:54.513647969 +0000 UTC m=+479.367041412" Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.602972 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wmt8w"] Dec 10 10:52:54 crc kubenswrapper[4780]: W1210 10:52:54.607202 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf7d88d5_88fb_4edb_aae6_5623f2e6f6bc.slice/crio-ca7a209683fc259822445fe5abf48060bf4f206379da88df7f6ff422c0a9e417 WatchSource:0}: Error finding container ca7a209683fc259822445fe5abf48060bf4f206379da88df7f6ff422c0a9e417: Status 404 returned error can't find the container with id ca7a209683fc259822445fe5abf48060bf4f206379da88df7f6ff422c0a9e417 Dec 10 10:52:54 crc kubenswrapper[4780]: I1210 10:52:54.681488 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vcdcr"] Dec 10 10:52:54 crc kubenswrapper[4780]: W1210 10:52:54.684602 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde46d142_d66a_4f62_887e_c1a0ef0e8da5.slice/crio-1bcd01189adb4033e9a1db0d51ba8e11eba39eac83764b7f0327759a39827069 WatchSource:0}: Error finding container 1bcd01189adb4033e9a1db0d51ba8e11eba39eac83764b7f0327759a39827069: Status 404 returned error can't find the container with id 1bcd01189adb4033e9a1db0d51ba8e11eba39eac83764b7f0327759a39827069 Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.466273 4780 generic.go:334] "Generic (PLEG): container finished" podID="de46d142-d66a-4f62-887e-c1a0ef0e8da5" containerID="ff02efe19b4081ca61170e192ab0c7b0bd5433c7fbb5d9e36bd49ea890198067" exitCode=0 Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.466327 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vcdcr" event={"ID":"de46d142-d66a-4f62-887e-c1a0ef0e8da5","Type":"ContainerDied","Data":"ff02efe19b4081ca61170e192ab0c7b0bd5433c7fbb5d9e36bd49ea890198067"} Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.466387 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vcdcr" event={"ID":"de46d142-d66a-4f62-887e-c1a0ef0e8da5","Type":"ContainerStarted","Data":"1bcd01189adb4033e9a1db0d51ba8e11eba39eac83764b7f0327759a39827069"} Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.470721 4780 generic.go:334] "Generic (PLEG): container finished" podID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerID="14baf8fdf3169ad4d101361777d42a3e11eef5b68c5548e80538a2c910d3428e" exitCode=0 Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.470818 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wmt8w" event={"ID":"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc","Type":"ContainerDied","Data":"14baf8fdf3169ad4d101361777d42a3e11eef5b68c5548e80538a2c910d3428e"} Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.470850 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wmt8w" event={"ID":"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc","Type":"ContainerStarted","Data":"ca7a209683fc259822445fe5abf48060bf4f206379da88df7f6ff422c0a9e417"} Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.476224 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crkcj" event={"ID":"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b","Type":"ContainerStarted","Data":"22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b"} Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.505731 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-crkcj" podStartSLOduration=1.905308089 podStartE2EDuration="4.505699787s" podCreationTimestamp="2025-12-10 10:52:51 +0000 UTC" firstStartedPulling="2025-12-10 10:52:52.42054369 +0000 UTC m=+477.273937133" lastFinishedPulling="2025-12-10 10:52:55.020935388 +0000 UTC m=+479.874328831" observedRunningTime="2025-12-10 10:52:55.500668393 +0000 UTC m=+480.354061856" watchObservedRunningTime="2025-12-10 10:52:55.505699787 +0000 UTC m=+480.359093230" Dec 10 10:52:55 crc kubenswrapper[4780]: I1210 10:52:55.954152 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-l9h98" Dec 10 10:52:56 crc kubenswrapper[4780]: I1210 10:52:56.034367 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-25gsf"] Dec 10 10:52:56 crc kubenswrapper[4780]: I1210 10:52:56.506010 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wmt8w" event={"ID":"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc","Type":"ContainerStarted","Data":"254853d6640a482670ff1e7b5568e92dab31de654e4298df88341b13c514c15d"} Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.475523 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.475979 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.476039 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.476837 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6ebc39bea1992f54cd24fdfecca195ad14903b8ade84ac83330f2cc7cf317153"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.477230 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://6ebc39bea1992f54cd24fdfecca195ad14903b8ade84ac83330f2cc7cf317153" gracePeriod=600 Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.515105 4780 generic.go:334] "Generic (PLEG): container finished" podID="de46d142-d66a-4f62-887e-c1a0ef0e8da5" containerID="c0df7a5fcbd338f46870ed3a37bc6018eef458265689033ea73474d5e4edfbeb" exitCode=0 Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.515198 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vcdcr" event={"ID":"de46d142-d66a-4f62-887e-c1a0ef0e8da5","Type":"ContainerDied","Data":"c0df7a5fcbd338f46870ed3a37bc6018eef458265689033ea73474d5e4edfbeb"} Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.521205 4780 generic.go:334] "Generic (PLEG): container finished" podID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerID="254853d6640a482670ff1e7b5568e92dab31de654e4298df88341b13c514c15d" exitCode=0 Dec 10 10:52:57 crc kubenswrapper[4780]: I1210 10:52:57.521280 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wmt8w" event={"ID":"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc","Type":"ContainerDied","Data":"254853d6640a482670ff1e7b5568e92dab31de654e4298df88341b13c514c15d"} Dec 10 10:52:58 crc kubenswrapper[4780]: I1210 10:52:58.531384 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wmt8w" event={"ID":"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc","Type":"ContainerStarted","Data":"694893effdd9d9f8acbc980e4eaa74344498e62666a9cf69958fc2d879117967"} Dec 10 10:52:58 crc kubenswrapper[4780]: I1210 10:52:58.535336 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="6ebc39bea1992f54cd24fdfecca195ad14903b8ade84ac83330f2cc7cf317153" exitCode=0 Dec 10 10:52:58 crc kubenswrapper[4780]: I1210 10:52:58.535397 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"6ebc39bea1992f54cd24fdfecca195ad14903b8ade84ac83330f2cc7cf317153"} Dec 10 10:52:58 crc kubenswrapper[4780]: I1210 10:52:58.535430 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"36d00436bbc9ae1da1897b8b9f2c3475af18239ee7063fe90cc695128e282bd3"} Dec 10 10:52:58 crc kubenswrapper[4780]: I1210 10:52:58.535453 4780 scope.go:117] "RemoveContainer" containerID="57ec3f3a4da992b4288db7d6633da18e479fdc56b10c503a2a44368671882f0d" Dec 10 10:52:58 crc kubenswrapper[4780]: I1210 10:52:58.562181 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wmt8w" podStartSLOduration=3.001375398 podStartE2EDuration="5.56215494s" podCreationTimestamp="2025-12-10 10:52:53 +0000 UTC" firstStartedPulling="2025-12-10 10:52:55.472225166 +0000 UTC m=+480.325618609" lastFinishedPulling="2025-12-10 10:52:58.033004708 +0000 UTC m=+482.886398151" observedRunningTime="2025-12-10 10:52:58.560077044 +0000 UTC m=+483.413470497" watchObservedRunningTime="2025-12-10 10:52:58.56215494 +0000 UTC m=+483.415548383" Dec 10 10:53:00 crc kubenswrapper[4780]: I1210 10:53:00.554021 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vcdcr" event={"ID":"de46d142-d66a-4f62-887e-c1a0ef0e8da5","Type":"ContainerStarted","Data":"774c1e8282384094683aeab08d788fc2078cf3cd9783368b177801d5c3749d7e"} Dec 10 10:53:00 crc kubenswrapper[4780]: I1210 10:53:00.576143 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vcdcr" podStartSLOduration=4.677304546 podStartE2EDuration="7.576112861s" podCreationTimestamp="2025-12-10 10:52:53 +0000 UTC" firstStartedPulling="2025-12-10 10:52:55.468454395 +0000 UTC m=+480.321847828" lastFinishedPulling="2025-12-10 10:52:58.3672627 +0000 UTC m=+483.220656143" observedRunningTime="2025-12-10 10:53:00.575250558 +0000 UTC m=+485.428644021" watchObservedRunningTime="2025-12-10 10:53:00.576112861 +0000 UTC m=+485.429506304" Dec 10 10:53:01 crc kubenswrapper[4780]: I1210 10:53:01.418099 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:53:01 crc kubenswrapper[4780]: I1210 10:53:01.418515 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:53:01 crc kubenswrapper[4780]: I1210 10:53:01.477718 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:53:01 crc kubenswrapper[4780]: I1210 10:53:01.607574 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:53:01 crc kubenswrapper[4780]: I1210 10:53:01.607636 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:53:01 crc kubenswrapper[4780]: I1210 10:53:01.608780 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hsqfx" Dec 10 10:53:01 crc kubenswrapper[4780]: I1210 10:53:01.659526 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:53:02 crc kubenswrapper[4780]: I1210 10:53:02.623689 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:53:04 crc kubenswrapper[4780]: I1210 10:53:04.123742 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:53:04 crc kubenswrapper[4780]: I1210 10:53:04.124363 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:53:04 crc kubenswrapper[4780]: I1210 10:53:04.164627 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:53:04 crc kubenswrapper[4780]: I1210 10:53:04.164798 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:53:04 crc kubenswrapper[4780]: I1210 10:53:04.181970 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:53:04 crc kubenswrapper[4780]: I1210 10:53:04.217850 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:53:04 crc kubenswrapper[4780]: I1210 10:53:04.635702 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 10:53:04 crc kubenswrapper[4780]: I1210 10:53:04.640948 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vcdcr" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.365184 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m"] Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.419812 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.423364 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.424911 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.425645 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.428807 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.434489 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m"] Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.434655 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.463828 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/a429b657-da8c-4e9c-bd76-fb8e06d71649-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.463907 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qzl6\" (UniqueName: \"kubernetes.io/projected/a429b657-da8c-4e9c-bd76-fb8e06d71649-kube-api-access-5qzl6\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.464335 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/a429b657-da8c-4e9c-bd76-fb8e06d71649-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.566189 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/a429b657-da8c-4e9c-bd76-fb8e06d71649-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.566268 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/a429b657-da8c-4e9c-bd76-fb8e06d71649-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.566302 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qzl6\" (UniqueName: \"kubernetes.io/projected/a429b657-da8c-4e9c-bd76-fb8e06d71649-kube-api-access-5qzl6\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.567796 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/a429b657-da8c-4e9c-bd76-fb8e06d71649-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.574359 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/a429b657-da8c-4e9c-bd76-fb8e06d71649-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.583774 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qzl6\" (UniqueName: \"kubernetes.io/projected/a429b657-da8c-4e9c-bd76-fb8e06d71649-kube-api-access-5qzl6\") pod \"cluster-monitoring-operator-6d5b84845-8nk9m\" (UID: \"a429b657-da8c-4e9c-bd76-fb8e06d71649\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:20 crc kubenswrapper[4780]: I1210 10:53:20.746645 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.077060 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" podUID="b7abac51-adc5-42fa-9084-033e4e7e7acb" containerName="registry" containerID="cri-o://a94d644d3444419d081379b16721f67e94d462caab5fc48fc02aff3a2f18b54d" gracePeriod=30 Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.186442 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m"] Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.291060 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" event={"ID":"a429b657-da8c-4e9c-bd76-fb8e06d71649","Type":"ContainerStarted","Data":"68ed0d87cdfc4d93851eeffb6269aeca6daefad74ba7c5913b08b9756fad2976"} Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.292816 4780 generic.go:334] "Generic (PLEG): container finished" podID="b7abac51-adc5-42fa-9084-033e4e7e7acb" containerID="a94d644d3444419d081379b16721f67e94d462caab5fc48fc02aff3a2f18b54d" exitCode=0 Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.292858 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" event={"ID":"b7abac51-adc5-42fa-9084-033e4e7e7acb","Type":"ContainerDied","Data":"a94d644d3444419d081379b16721f67e94d462caab5fc48fc02aff3a2f18b54d"} Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.528478 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.691624 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b7abac51-adc5-42fa-9084-033e4e7e7acb-ca-trust-extracted\") pod \"b7abac51-adc5-42fa-9084-033e4e7e7acb\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.691754 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b7abac51-adc5-42fa-9084-033e4e7e7acb-installation-pull-secrets\") pod \"b7abac51-adc5-42fa-9084-033e4e7e7acb\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.691789 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-certificates\") pod \"b7abac51-adc5-42fa-9084-033e4e7e7acb\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.691868 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smgl2\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-kube-api-access-smgl2\") pod \"b7abac51-adc5-42fa-9084-033e4e7e7acb\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.691894 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-bound-sa-token\") pod \"b7abac51-adc5-42fa-9084-033e4e7e7acb\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.691948 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-tls\") pod \"b7abac51-adc5-42fa-9084-033e4e7e7acb\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.691993 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-trusted-ca\") pod \"b7abac51-adc5-42fa-9084-033e4e7e7acb\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.692308 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"b7abac51-adc5-42fa-9084-033e4e7e7acb\" (UID: \"b7abac51-adc5-42fa-9084-033e4e7e7acb\") " Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.693334 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "b7abac51-adc5-42fa-9084-033e4e7e7acb" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.693800 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "b7abac51-adc5-42fa-9084-033e4e7e7acb" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.701303 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7abac51-adc5-42fa-9084-033e4e7e7acb-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "b7abac51-adc5-42fa-9084-033e4e7e7acb" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.703554 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-kube-api-access-smgl2" (OuterVolumeSpecName: "kube-api-access-smgl2") pod "b7abac51-adc5-42fa-9084-033e4e7e7acb" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb"). InnerVolumeSpecName "kube-api-access-smgl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.705440 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "b7abac51-adc5-42fa-9084-033e4e7e7acb" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.705850 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "b7abac51-adc5-42fa-9084-033e4e7e7acb" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.710521 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "b7abac51-adc5-42fa-9084-033e4e7e7acb" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.715183 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7abac51-adc5-42fa-9084-033e4e7e7acb-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "b7abac51-adc5-42fa-9084-033e4e7e7acb" (UID: "b7abac51-adc5-42fa-9084-033e4e7e7acb"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.794440 4780 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.794707 4780 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.794717 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.794726 4780 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b7abac51-adc5-42fa-9084-033e4e7e7acb-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.794736 4780 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b7abac51-adc5-42fa-9084-033e4e7e7acb-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.794748 4780 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b7abac51-adc5-42fa-9084-033e4e7e7acb-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 10 10:53:21 crc kubenswrapper[4780]: I1210 10:53:21.794757 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smgl2\" (UniqueName: \"kubernetes.io/projected/b7abac51-adc5-42fa-9084-033e4e7e7acb-kube-api-access-smgl2\") on node \"crc\" DevicePath \"\"" Dec 10 10:53:22 crc kubenswrapper[4780]: I1210 10:53:22.301697 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" event={"ID":"b7abac51-adc5-42fa-9084-033e4e7e7acb","Type":"ContainerDied","Data":"8741fac9d147b7aa199416b76d1e641d7a6af31295564264120a8d9720d1d0b3"} Dec 10 10:53:22 crc kubenswrapper[4780]: I1210 10:53:22.301802 4780 scope.go:117] "RemoveContainer" containerID="a94d644d3444419d081379b16721f67e94d462caab5fc48fc02aff3a2f18b54d" Dec 10 10:53:22 crc kubenswrapper[4780]: I1210 10:53:22.301816 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-25gsf" Dec 10 10:53:22 crc kubenswrapper[4780]: I1210 10:53:22.329965 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-25gsf"] Dec 10 10:53:22 crc kubenswrapper[4780]: I1210 10:53:22.336764 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-25gsf"] Dec 10 10:53:23 crc kubenswrapper[4780]: I1210 10:53:23.966284 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7abac51-adc5-42fa-9084-033e4e7e7acb" path="/var/lib/kubelet/pods/b7abac51-adc5-42fa-9084-033e4e7e7acb/volumes" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.471363 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" event={"ID":"a429b657-da8c-4e9c-bd76-fb8e06d71649","Type":"ContainerStarted","Data":"9b77e49d333d694307cbad0a90cfe45c4c0f33be493f79240ce410363df76ad1"} Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.514808 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-8nk9m" podStartSLOduration=2.087576656 podStartE2EDuration="4.514745151s" podCreationTimestamp="2025-12-10 10:53:20 +0000 UTC" firstStartedPulling="2025-12-10 10:53:21.202990979 +0000 UTC m=+506.056384422" lastFinishedPulling="2025-12-10 10:53:23.630159474 +0000 UTC m=+508.483552917" observedRunningTime="2025-12-10 10:53:24.51096078 +0000 UTC m=+509.364354233" watchObservedRunningTime="2025-12-10 10:53:24.514745151 +0000 UTC m=+509.368138594" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.556671 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn"] Dec 10 10:53:24 crc kubenswrapper[4780]: E1210 10:53:24.557186 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7abac51-adc5-42fa-9084-033e4e7e7acb" containerName="registry" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.557265 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7abac51-adc5-42fa-9084-033e4e7e7acb" containerName="registry" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.557501 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7abac51-adc5-42fa-9084-033e4e7e7acb" containerName="registry" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.558287 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.560840 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-xdl7p" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.562131 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.576782 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn"] Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.587252 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-fnznn\" (UID: \"2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" Dec 10 10:53:24 crc kubenswrapper[4780]: I1210 10:53:24.689148 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-fnznn\" (UID: \"2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" Dec 10 10:53:24 crc kubenswrapper[4780]: E1210 10:53:24.689436 4780 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Dec 10 10:53:24 crc kubenswrapper[4780]: E1210 10:53:24.689657 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73-tls-certificates podName:2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73 nodeName:}" failed. No retries permitted until 2025-12-10 10:53:25.189584457 +0000 UTC m=+510.042977900 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-fnznn" (UID: "2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73") : secret "prometheus-operator-admission-webhook-tls" not found Dec 10 10:53:25 crc kubenswrapper[4780]: I1210 10:53:25.197727 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-fnznn\" (UID: \"2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" Dec 10 10:53:25 crc kubenswrapper[4780]: I1210 10:53:25.206023 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-fnznn\" (UID: \"2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" Dec 10 10:53:25 crc kubenswrapper[4780]: I1210 10:53:25.661150 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" Dec 10 10:53:26 crc kubenswrapper[4780]: I1210 10:53:26.101714 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn"] Dec 10 10:53:26 crc kubenswrapper[4780]: W1210 10:53:26.109995 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2abfea71_0ea5_4ca2_9dc9_7a8e7a1c4c73.slice/crio-23a700ac17bf969f21e783f4076157af0ee4674e3b11875708762ecbe4d0a20f WatchSource:0}: Error finding container 23a700ac17bf969f21e783f4076157af0ee4674e3b11875708762ecbe4d0a20f: Status 404 returned error can't find the container with id 23a700ac17bf969f21e783f4076157af0ee4674e3b11875708762ecbe4d0a20f Dec 10 10:53:26 crc kubenswrapper[4780]: I1210 10:53:26.674646 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" event={"ID":"2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73","Type":"ContainerStarted","Data":"23a700ac17bf969f21e783f4076157af0ee4674e3b11875708762ecbe4d0a20f"} Dec 10 10:53:29 crc kubenswrapper[4780]: I1210 10:53:29.705818 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" event={"ID":"2abfea71-0ea5-4ca2-9dc9-7a8e7a1c4c73","Type":"ContainerStarted","Data":"f73f456e2e42532c9f8f6899f00a45a36772a050122174470cce5a5b3ff70c2c"} Dec 10 10:53:29 crc kubenswrapper[4780]: I1210 10:53:29.707692 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" Dec 10 10:53:29 crc kubenswrapper[4780]: I1210 10:53:29.714569 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" Dec 10 10:53:29 crc kubenswrapper[4780]: I1210 10:53:29.741092 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-fnznn" podStartSLOduration=3.40136853 podStartE2EDuration="5.741060336s" podCreationTimestamp="2025-12-10 10:53:24 +0000 UTC" firstStartedPulling="2025-12-10 10:53:26.342112243 +0000 UTC m=+511.195505686" lastFinishedPulling="2025-12-10 10:53:28.681804049 +0000 UTC m=+513.535197492" observedRunningTime="2025-12-10 10:53:29.733269719 +0000 UTC m=+514.586663192" watchObservedRunningTime="2025-12-10 10:53:29.741060336 +0000 UTC m=+514.594453779" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.597394 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-2xl5d"] Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.598445 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.604010 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.604577 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.604612 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-f5xxm" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.604678 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.611381 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-2xl5d"] Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.620231 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.620298 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.620360 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-metrics-client-ca\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.620671 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk57p\" (UniqueName: \"kubernetes.io/projected/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-kube-api-access-hk57p\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.725471 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.726858 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.727125 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-metrics-client-ca\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.727494 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk57p\" (UniqueName: \"kubernetes.io/projected/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-kube-api-access-hk57p\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.728350 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-metrics-client-ca\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.735539 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.735573 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.754021 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk57p\" (UniqueName: \"kubernetes.io/projected/cca099c8-c01e-4c66-83da-53ddb2bd4e3f-kube-api-access-hk57p\") pod \"prometheus-operator-db54df47d-2xl5d\" (UID: \"cca099c8-c01e-4c66-83da-53ddb2bd4e3f\") " pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:30 crc kubenswrapper[4780]: I1210 10:53:30.919271 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" Dec 10 10:53:31 crc kubenswrapper[4780]: I1210 10:53:31.411743 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-2xl5d"] Dec 10 10:53:31 crc kubenswrapper[4780]: W1210 10:53:31.423642 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcca099c8_c01e_4c66_83da_53ddb2bd4e3f.slice/crio-23970ebfe817933ff6dff58ee20e01714968898820a2d31b68faf6d74bbab273 WatchSource:0}: Error finding container 23970ebfe817933ff6dff58ee20e01714968898820a2d31b68faf6d74bbab273: Status 404 returned error can't find the container with id 23970ebfe817933ff6dff58ee20e01714968898820a2d31b68faf6d74bbab273 Dec 10 10:53:31 crc kubenswrapper[4780]: I1210 10:53:31.426874 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 10:53:31 crc kubenswrapper[4780]: I1210 10:53:31.718758 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" event={"ID":"cca099c8-c01e-4c66-83da-53ddb2bd4e3f","Type":"ContainerStarted","Data":"23970ebfe817933ff6dff58ee20e01714968898820a2d31b68faf6d74bbab273"} Dec 10 10:53:36 crc kubenswrapper[4780]: I1210 10:53:36.757367 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" event={"ID":"cca099c8-c01e-4c66-83da-53ddb2bd4e3f","Type":"ContainerStarted","Data":"968fe3a2fa68f8f0f61f3896c883152381f9bcd80e23d6706fdbf1c8044ea9eb"} Dec 10 10:53:37 crc kubenswrapper[4780]: I1210 10:53:37.765611 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" event={"ID":"cca099c8-c01e-4c66-83da-53ddb2bd4e3f","Type":"ContainerStarted","Data":"3a8034336071789f609d6496915dab949b6925d2d550480400279fffa1c1f721"} Dec 10 10:53:37 crc kubenswrapper[4780]: I1210 10:53:37.790488 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-2xl5d" podStartSLOduration=2.649772369 podStartE2EDuration="7.790440737s" podCreationTimestamp="2025-12-10 10:53:30 +0000 UTC" firstStartedPulling="2025-12-10 10:53:31.426431718 +0000 UTC m=+516.279825161" lastFinishedPulling="2025-12-10 10:53:36.567100086 +0000 UTC m=+521.420493529" observedRunningTime="2025-12-10 10:53:37.782824935 +0000 UTC m=+522.636218388" watchObservedRunningTime="2025-12-10 10:53:37.790440737 +0000 UTC m=+522.643834180" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.965851 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7"] Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.967465 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.969009 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-hzkk9"] Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.969841 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.970095 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.970313 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-5qvqm" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.970384 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.971963 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.972155 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.972308 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-hrggd" Dec 10 10:53:39 crc kubenswrapper[4780]: I1210 10:53:39.991269 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7"] Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.049904 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz"] Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.051389 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.053187 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.055062 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.057716 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-86fqr" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.058278 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062007 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-wtmp\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062049 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-tls\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062080 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/a807df46-18bc-439b-ac2d-d3ef96682a73-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062107 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-root\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062130 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-textfile\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062165 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czvhp\" (UniqueName: \"kubernetes.io/projected/a807df46-18bc-439b-ac2d-d3ef96682a73-kube-api-access-czvhp\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062192 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062213 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8wmn\" (UniqueName: \"kubernetes.io/projected/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-api-access-g8wmn\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062242 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062280 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/dcdd80bc-0b73-4393-abca-5cf977bbb84b-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062311 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062362 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bp8f\" (UniqueName: \"kubernetes.io/projected/77c7f18f-58ec-48de-9227-a0e210ae07c7-kube-api-access-4bp8f\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062379 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/dcdd80bc-0b73-4393-abca-5cf977bbb84b-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062417 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/77c7f18f-58ec-48de-9227-a0e210ae07c7-metrics-client-ca\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062433 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/a807df46-18bc-439b-ac2d-d3ef96682a73-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062583 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062614 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/a807df46-18bc-439b-ac2d-d3ef96682a73-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.062629 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-sys\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.096750 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz"] Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.163910 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/77c7f18f-58ec-48de-9227-a0e210ae07c7-metrics-client-ca\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.163981 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/a807df46-18bc-439b-ac2d-d3ef96682a73-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164036 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164064 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-sys\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164117 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/a807df46-18bc-439b-ac2d-d3ef96682a73-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164136 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-wtmp\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164155 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-tls\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164182 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/a807df46-18bc-439b-ac2d-d3ef96682a73-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164242 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-root\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164263 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-textfile\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164286 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czvhp\" (UniqueName: \"kubernetes.io/projected/a807df46-18bc-439b-ac2d-d3ef96682a73-kube-api-access-czvhp\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164317 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164338 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8wmn\" (UniqueName: \"kubernetes.io/projected/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-api-access-g8wmn\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164395 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164417 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/dcdd80bc-0b73-4393-abca-5cf977bbb84b-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164461 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164498 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bp8f\" (UniqueName: \"kubernetes.io/projected/77c7f18f-58ec-48de-9227-a0e210ae07c7-kube-api-access-4bp8f\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164542 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/dcdd80bc-0b73-4393-abca-5cf977bbb84b-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.164851 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/77c7f18f-58ec-48de-9227-a0e210ae07c7-metrics-client-ca\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: E1210 10:53:40.165523 4780 secret.go:188] Couldn't get secret openshift-monitoring/kube-state-metrics-tls: secret "kube-state-metrics-tls" not found Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.165682 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-textfile\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.165703 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/dcdd80bc-0b73-4393-abca-5cf977bbb84b-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: E1210 10:53:40.165718 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-tls podName:dcdd80bc-0b73-4393-abca-5cf977bbb84b nodeName:}" failed. No retries permitted until 2025-12-10 10:53:40.665687097 +0000 UTC m=+525.519080540 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-state-metrics-tls" (UniqueName: "kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-tls") pod "kube-state-metrics-777cb5bd5d-c6vcz" (UID: "dcdd80bc-0b73-4393-abca-5cf977bbb84b") : secret "kube-state-metrics-tls" not found Dec 10 10:53:40 crc kubenswrapper[4780]: E1210 10:53:40.166307 4780 secret.go:188] Couldn't get secret openshift-monitoring/node-exporter-tls: secret "node-exporter-tls" not found Dec 10 10:53:40 crc kubenswrapper[4780]: E1210 10:53:40.166396 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-tls podName:77c7f18f-58ec-48de-9227-a0e210ae07c7 nodeName:}" failed. No retries permitted until 2025-12-10 10:53:40.666358095 +0000 UTC m=+525.519751538 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-exporter-tls" (UniqueName: "kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-tls") pod "node-exporter-hzkk9" (UID: "77c7f18f-58ec-48de-9227-a0e210ae07c7") : secret "node-exporter-tls" not found Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.166118 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-sys\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.166086 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-wtmp\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.167162 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/77c7f18f-58ec-48de-9227-a0e210ae07c7-root\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.167219 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.168285 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/a807df46-18bc-439b-ac2d-d3ef96682a73-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.169320 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/dcdd80bc-0b73-4393-abca-5cf977bbb84b-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.174375 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.178263 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/a807df46-18bc-439b-ac2d-d3ef96682a73-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.189135 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czvhp\" (UniqueName: \"kubernetes.io/projected/a807df46-18bc-439b-ac2d-d3ef96682a73-kube-api-access-czvhp\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.189776 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/a807df46-18bc-439b-ac2d-d3ef96682a73-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-4tzw7\" (UID: \"a807df46-18bc-439b-ac2d-d3ef96682a73\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.189811 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.191316 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bp8f\" (UniqueName: \"kubernetes.io/projected/77c7f18f-58ec-48de-9227-a0e210ae07c7-kube-api-access-4bp8f\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.202615 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8wmn\" (UniqueName: \"kubernetes.io/projected/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-api-access-g8wmn\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.342176 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.671856 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-tls\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.672309 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.676781 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/77c7f18f-58ec-48de-9227-a0e210ae07c7-node-exporter-tls\") pod \"node-exporter-hzkk9\" (UID: \"77c7f18f-58ec-48de-9227-a0e210ae07c7\") " pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.678187 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/dcdd80bc-0b73-4393-abca-5cf977bbb84b-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-c6vcz\" (UID: \"dcdd80bc-0b73-4393-abca-5cf977bbb84b\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.810190 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7"] Dec 10 10:53:40 crc kubenswrapper[4780]: W1210 10:53:40.816480 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda807df46_18bc_439b_ac2d_d3ef96682a73.slice/crio-72575f2e93599b47dedf1387f45abad11418e77844d0c5e47d0b15fd0924314a WatchSource:0}: Error finding container 72575f2e93599b47dedf1387f45abad11418e77844d0c5e47d0b15fd0924314a: Status 404 returned error can't find the container with id 72575f2e93599b47dedf1387f45abad11418e77844d0c5e47d0b15fd0924314a Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.951563 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-hzkk9" Dec 10 10:53:40 crc kubenswrapper[4780]: I1210 10:53:40.971872 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.147996 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.150286 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.152459 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.152484 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.152495 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.152495 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.154530 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.154550 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.154591 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-kcwsx" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.154680 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.162489 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.176502 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.285080 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-tls-assets\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.285137 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.285171 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-config-volume\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.285212 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.285245 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-config-out\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.285267 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.286250 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.286351 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.286376 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2kvr\" (UniqueName: \"kubernetes.io/projected/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-kube-api-access-b2kvr\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.286455 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-web-config\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.286486 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.286581 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.389415 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.389484 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-config-volume\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.389509 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.389563 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-config-out\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.389587 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.389610 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.389647 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.390691 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.390755 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2kvr\" (UniqueName: \"kubernetes.io/projected/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-kube-api-access-b2kvr\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.390809 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-web-config\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.390834 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.390846 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.390887 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.390911 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-tls-assets\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.392322 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.395642 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.395984 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-web-config\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.398024 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.398367 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.401259 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-config-out\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.401489 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.404004 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-tls-assets\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.404269 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-config-volume\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.412179 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2kvr\" (UniqueName: \"kubernetes.io/projected/b41fe3df-3e49-4ee5-8f9c-e2f725888b4b-kube-api-access-b2kvr\") pod \"alertmanager-main-0\" (UID: \"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b\") " pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.567480 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.571241 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz"] Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.798700 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" event={"ID":"dcdd80bc-0b73-4393-abca-5cf977bbb84b","Type":"ContainerStarted","Data":"57854c525da61e1a8cfa16473ba83024c40b4de386d3c8ab053bc1ec13c0711d"} Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.806733 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" event={"ID":"a807df46-18bc-439b-ac2d-d3ef96682a73","Type":"ContainerStarted","Data":"b599a494446586e2e856b6cb0e583724341b39680f084c4a8272b10f075fadfc"} Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.806801 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" event={"ID":"a807df46-18bc-439b-ac2d-d3ef96682a73","Type":"ContainerStarted","Data":"73cee3c62f7e409db08dd769e30cf965a43378e94163e459f02fe614a2fbfb79"} Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.806816 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" event={"ID":"a807df46-18bc-439b-ac2d-d3ef96682a73","Type":"ContainerStarted","Data":"72575f2e93599b47dedf1387f45abad11418e77844d0c5e47d0b15fd0924314a"} Dec 10 10:53:41 crc kubenswrapper[4780]: I1210 10:53:41.808453 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hzkk9" event={"ID":"77c7f18f-58ec-48de-9227-a0e210ae07c7","Type":"ContainerStarted","Data":"96c8b5341e6864d52bd2d3b6c5dfe0e76095470d2e30fe1fd92f92dc6668a4dd"} Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.003560 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 10 10:53:42 crc kubenswrapper[4780]: W1210 10:53:42.015545 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb41fe3df_3e49_4ee5_8f9c_e2f725888b4b.slice/crio-2d1dd3e23d7c5bc3d0949863a3f92b3d977a07956d0762835a3a63137b1c9029 WatchSource:0}: Error finding container 2d1dd3e23d7c5bc3d0949863a3f92b3d977a07956d0762835a3a63137b1c9029: Status 404 returned error can't find the container with id 2d1dd3e23d7c5bc3d0949863a3f92b3d977a07956d0762835a3a63137b1c9029 Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.106867 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z"] Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.109351 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.111778 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.112499 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-knn5p" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.112548 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.112557 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.113260 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.113292 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-5qah6j8p18k8c" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.113477 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.128413 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z"] Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.204896 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/faca989e-eb37-4004-8522-de2faeee92e3-metrics-client-ca\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.205013 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.205064 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.205131 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-tls\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.205208 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.205292 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2kvr\" (UniqueName: \"kubernetes.io/projected/faca989e-eb37-4004-8522-de2faeee92e3-kube-api-access-d2kvr\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.205324 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-grpc-tls\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.205351 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.306869 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/faca989e-eb37-4004-8522-de2faeee92e3-metrics-client-ca\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.306977 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.307040 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.307103 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-tls\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.307149 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.307187 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2kvr\" (UniqueName: \"kubernetes.io/projected/faca989e-eb37-4004-8522-de2faeee92e3-kube-api-access-d2kvr\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.307218 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-grpc-tls\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.307244 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.308881 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/faca989e-eb37-4004-8522-de2faeee92e3-metrics-client-ca\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.314002 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.314690 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.322650 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.326803 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-grpc-tls\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.327203 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-tls\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.327877 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/faca989e-eb37-4004-8522-de2faeee92e3-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.330724 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2kvr\" (UniqueName: \"kubernetes.io/projected/faca989e-eb37-4004-8522-de2faeee92e3-kube-api-access-d2kvr\") pod \"thanos-querier-5dc6df75d9-4ps6z\" (UID: \"faca989e-eb37-4004-8522-de2faeee92e3\") " pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.431359 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:42 crc kubenswrapper[4780]: I1210 10:53:42.816876 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b","Type":"ContainerStarted","Data":"2d1dd3e23d7c5bc3d0949863a3f92b3d977a07956d0762835a3a63137b1c9029"} Dec 10 10:53:43 crc kubenswrapper[4780]: I1210 10:53:43.833124 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" event={"ID":"dcdd80bc-0b73-4393-abca-5cf977bbb84b","Type":"ContainerStarted","Data":"0bc09f175263184adcb143e7e95f73fd6a5c39044df2b50474c8d8421a4a52c3"} Dec 10 10:53:43 crc kubenswrapper[4780]: I1210 10:53:43.845241 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" event={"ID":"a807df46-18bc-439b-ac2d-d3ef96682a73","Type":"ContainerStarted","Data":"c220147d1f22f067332c89a517a398498e027710e55f8e27d2668a100a9fa681"} Dec 10 10:53:43 crc kubenswrapper[4780]: I1210 10:53:43.852662 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hzkk9" event={"ID":"77c7f18f-58ec-48de-9227-a0e210ae07c7","Type":"ContainerStarted","Data":"0184661c4788a8845e397f594308ea9f12ba294c2644a9727eb4dca95ca63d2f"} Dec 10 10:53:43 crc kubenswrapper[4780]: I1210 10:53:43.873251 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-4tzw7" podStartSLOduration=2.783104398 podStartE2EDuration="4.873224777s" podCreationTimestamp="2025-12-10 10:53:39 +0000 UTC" firstStartedPulling="2025-12-10 10:53:41.322154814 +0000 UTC m=+526.175548257" lastFinishedPulling="2025-12-10 10:53:43.412275193 +0000 UTC m=+528.265668636" observedRunningTime="2025-12-10 10:53:43.867452433 +0000 UTC m=+528.720845876" watchObservedRunningTime="2025-12-10 10:53:43.873224777 +0000 UTC m=+528.726618220" Dec 10 10:53:43 crc kubenswrapper[4780]: I1210 10:53:43.895003 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z"] Dec 10 10:53:44 crc kubenswrapper[4780]: W1210 10:53:44.070599 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfaca989e_eb37_4004_8522_de2faeee92e3.slice/crio-74611fac8a6aa66b3d80310fde169b24badf082c6d465065f9eee38707aaaf1e WatchSource:0}: Error finding container 74611fac8a6aa66b3d80310fde169b24badf082c6d465065f9eee38707aaaf1e: Status 404 returned error can't find the container with id 74611fac8a6aa66b3d80310fde169b24badf082c6d465065f9eee38707aaaf1e Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.801980 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-549cdbd99b-lf7hk"] Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.803280 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.820533 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-549cdbd99b-lf7hk"] Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.850464 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-service-ca\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.850553 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-trusted-ca-bundle\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.850593 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-oauth-serving-cert\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.850627 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-console-config\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.850653 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-serving-cert\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.850870 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s56ss\" (UniqueName: \"kubernetes.io/projected/1a100f8e-37f1-4624-be03-49e295e939ce-kube-api-access-s56ss\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.851285 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-oauth-config\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.865887 4780 generic.go:334] "Generic (PLEG): container finished" podID="b41fe3df-3e49-4ee5-8f9c-e2f725888b4b" containerID="32f9ef93d3a3344f757f4c53ddd3ad473f1bc4b3c12f858ed8c9f19c9a0dcfb6" exitCode=0 Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.866048 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b","Type":"ContainerDied","Data":"32f9ef93d3a3344f757f4c53ddd3ad473f1bc4b3c12f858ed8c9f19c9a0dcfb6"} Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.868510 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" event={"ID":"faca989e-eb37-4004-8522-de2faeee92e3","Type":"ContainerStarted","Data":"74611fac8a6aa66b3d80310fde169b24badf082c6d465065f9eee38707aaaf1e"} Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.873708 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" event={"ID":"dcdd80bc-0b73-4393-abca-5cf977bbb84b","Type":"ContainerStarted","Data":"52ed76264d4291719f2e72cb424fb630a70caa4702ead8b62c394771febe9733"} Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.873760 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" event={"ID":"dcdd80bc-0b73-4393-abca-5cf977bbb84b","Type":"ContainerStarted","Data":"5551a78dd587039ed73412ce7036fc3ccb0a25d6e42b576093b825df5525bf64"} Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.879215 4780 generic.go:334] "Generic (PLEG): container finished" podID="77c7f18f-58ec-48de-9227-a0e210ae07c7" containerID="0184661c4788a8845e397f594308ea9f12ba294c2644a9727eb4dca95ca63d2f" exitCode=0 Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.879331 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hzkk9" event={"ID":"77c7f18f-58ec-48de-9227-a0e210ae07c7","Type":"ContainerDied","Data":"0184661c4788a8845e397f594308ea9f12ba294c2644a9727eb4dca95ca63d2f"} Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.954231 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-c6vcz" podStartSLOduration=3.9964225239999998 podStartE2EDuration="5.954200902s" podCreationTimestamp="2025-12-10 10:53:39 +0000 UTC" firstStartedPulling="2025-12-10 10:53:41.576443618 +0000 UTC m=+526.429837061" lastFinishedPulling="2025-12-10 10:53:43.534221996 +0000 UTC m=+528.387615439" observedRunningTime="2025-12-10 10:53:44.953665408 +0000 UTC m=+529.807058901" watchObservedRunningTime="2025-12-10 10:53:44.954200902 +0000 UTC m=+529.807594355" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.954603 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-service-ca\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.955218 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-trusted-ca-bundle\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.955282 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-oauth-serving-cert\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.955360 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-console-config\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.955400 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-serving-cert\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.955671 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-oauth-config\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.955711 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s56ss\" (UniqueName: \"kubernetes.io/projected/1a100f8e-37f1-4624-be03-49e295e939ce-kube-api-access-s56ss\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.955724 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-service-ca\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.959045 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-trusted-ca-bundle\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.959317 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-oauth-serving-cert\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.960782 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-console-config\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.966101 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-serving-cert\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.966099 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-oauth-config\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:44 crc kubenswrapper[4780]: I1210 10:53:44.986296 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s56ss\" (UniqueName: \"kubernetes.io/projected/1a100f8e-37f1-4624-be03-49e295e939ce-kube-api-access-s56ss\") pod \"console-549cdbd99b-lf7hk\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.123858 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.503783 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-64c74dff4-7dr67"] Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.507326 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-64c74dff4-7dr67"] Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.507613 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.513384 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.513532 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-bdpleerava7lb" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.513634 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.513444 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.513904 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-57cmt" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.513997 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.567772 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-secret-metrics-client-certs\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.567880 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-metrics-server-audit-profiles\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.568067 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-audit-log\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.568260 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.568363 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzbwd\" (UniqueName: \"kubernetes.io/projected/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-kube-api-access-vzbwd\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.568484 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-client-ca-bundle\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.568610 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-secret-metrics-server-tls\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.611213 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-549cdbd99b-lf7hk"] Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.672077 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-secret-metrics-server-tls\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.672168 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-secret-metrics-client-certs\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.672216 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-metrics-server-audit-profiles\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.672260 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-audit-log\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.672336 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.672419 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzbwd\" (UniqueName: \"kubernetes.io/projected/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-kube-api-access-vzbwd\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.672460 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-client-ca-bundle\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.673903 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-audit-log\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.675030 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.675217 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-metrics-server-audit-profiles\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.678734 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-secret-metrics-server-tls\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.680300 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-secret-metrics-client-certs\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.690259 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-client-ca-bundle\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.697456 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzbwd\" (UniqueName: \"kubernetes.io/projected/fe22a59d-5885-47fe-a089-d4ffdd1e94ba-kube-api-access-vzbwd\") pod \"metrics-server-64c74dff4-7dr67\" (UID: \"fe22a59d-5885-47fe-a089-d4ffdd1e94ba\") " pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.729080 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-6555675455-shs6d"] Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.732394 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.735441 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.735459 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.789147 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/28e28f0d-07a7-46ce-8d47-5f1b8682faaf-monitoring-plugin-cert\") pod \"monitoring-plugin-6555675455-shs6d\" (UID: \"28e28f0d-07a7-46ce-8d47-5f1b8682faaf\") " pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.792471 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-6555675455-shs6d"] Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.834544 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.896323 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/28e28f0d-07a7-46ce-8d47-5f1b8682faaf-monitoring-plugin-cert\") pod \"monitoring-plugin-6555675455-shs6d\" (UID: \"28e28f0d-07a7-46ce-8d47-5f1b8682faaf\") " pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.906042 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hzkk9" event={"ID":"77c7f18f-58ec-48de-9227-a0e210ae07c7","Type":"ContainerStarted","Data":"f1659239510a4b2dba8e2dd86754f6b800cd57a7ce65ece6f8c0030e5f235e6a"} Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.906145 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hzkk9" event={"ID":"77c7f18f-58ec-48de-9227-a0e210ae07c7","Type":"ContainerStarted","Data":"05dc288a20e5fb6ee62ba24eb9b53f0518d1bbe9fa3d4a93ad7a9c3937a57833"} Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.922965 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/28e28f0d-07a7-46ce-8d47-5f1b8682faaf-monitoring-plugin-cert\") pod \"monitoring-plugin-6555675455-shs6d\" (UID: \"28e28f0d-07a7-46ce-8d47-5f1b8682faaf\") " pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" Dec 10 10:53:45 crc kubenswrapper[4780]: I1210 10:53:45.950366 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-549cdbd99b-lf7hk" event={"ID":"1a100f8e-37f1-4624-be03-49e295e939ce","Type":"ContainerStarted","Data":"00ac0b46f9e59552433ef35e883d4a539c6983a58fb9e4dc719ae271008c3dc1"} Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.054053 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-hzkk9" podStartSLOduration=4.638933682 podStartE2EDuration="7.054011908s" podCreationTimestamp="2025-12-10 10:53:39 +0000 UTC" firstStartedPulling="2025-12-10 10:53:40.995167843 +0000 UTC m=+525.848561276" lastFinishedPulling="2025-12-10 10:53:43.410246059 +0000 UTC m=+528.263639502" observedRunningTime="2025-12-10 10:53:45.970246117 +0000 UTC m=+530.823639560" watchObservedRunningTime="2025-12-10 10:53:46.054011908 +0000 UTC m=+530.907405351" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.119525 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.410872 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.420281 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.423802 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-68dzs" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.424252 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-64c74dff4-7dr67"] Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.424992 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.426869 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.427218 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.427394 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.427765 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.427888 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.427966 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.428139 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.428170 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-8vp593pr413l9" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.436237 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.438713 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.445182 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.493934 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.522817 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-config\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.522881 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.522958 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgp7x\" (UniqueName: \"kubernetes.io/projected/bb29164d-ab20-4069-88fd-3e44aaf2548e-kube-api-access-sgp7x\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.522984 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.523001 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.523441 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/bb29164d-ab20-4069-88fd-3e44aaf2548e-config-out\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.523543 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.523655 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.523725 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.523843 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.523900 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.523982 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.524051 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.524093 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/bb29164d-ab20-4069-88fd-3e44aaf2548e-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.524195 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.524231 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.524296 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-web-config\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.524368 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.616567 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-6555675455-shs6d"] Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.624965 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/bb29164d-ab20-4069-88fd-3e44aaf2548e-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625041 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625072 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625105 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-web-config\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625147 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625194 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-config\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625222 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625247 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgp7x\" (UniqueName: \"kubernetes.io/projected/bb29164d-ab20-4069-88fd-3e44aaf2548e-kube-api-access-sgp7x\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625270 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625291 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625317 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/bb29164d-ab20-4069-88fd-3e44aaf2548e-config-out\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625345 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625382 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625412 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625496 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625526 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625557 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.625592 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.629787 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.630236 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.635765 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.636766 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.637232 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-web-config\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.637762 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.639252 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.639735 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/bb29164d-ab20-4069-88fd-3e44aaf2548e-config-out\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.642551 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/bb29164d-ab20-4069-88fd-3e44aaf2548e-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.642624 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.644568 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.646786 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.647575 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.651295 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.663202 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.663190 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgp7x\" (UniqueName: \"kubernetes.io/projected/bb29164d-ab20-4069-88fd-3e44aaf2548e-kube-api-access-sgp7x\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.664759 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/bb29164d-ab20-4069-88fd-3e44aaf2548e-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.667589 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/bb29164d-ab20-4069-88fd-3e44aaf2548e-config\") pod \"prometheus-k8s-0\" (UID: \"bb29164d-ab20-4069-88fd-3e44aaf2548e\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.850866 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.961866 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" event={"ID":"fe22a59d-5885-47fe-a089-d4ffdd1e94ba","Type":"ContainerStarted","Data":"f1f3fc2276ed598c17f48e8d3e6c3c5127e0c6dce0880ed1c1775578067b06ba"} Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.963758 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" event={"ID":"28e28f0d-07a7-46ce-8d47-5f1b8682faaf","Type":"ContainerStarted","Data":"72f4ec7973149635ee6155db704da0d4c56300f82acd4d56e080ef2c88c71f66"} Dec 10 10:53:46 crc kubenswrapper[4780]: I1210 10:53:46.972986 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-549cdbd99b-lf7hk" event={"ID":"1a100f8e-37f1-4624-be03-49e295e939ce","Type":"ContainerStarted","Data":"61cc02824914de77e4dcbd6f97fff7e54cda37d4d1c01b03b840d73724d43880"} Dec 10 10:53:47 crc kubenswrapper[4780]: I1210 10:53:47.013161 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-549cdbd99b-lf7hk" podStartSLOduration=3.013129193 podStartE2EDuration="3.013129193s" podCreationTimestamp="2025-12-10 10:53:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:53:46.993074321 +0000 UTC m=+531.846467764" watchObservedRunningTime="2025-12-10 10:53:47.013129193 +0000 UTC m=+531.866522636" Dec 10 10:53:47 crc kubenswrapper[4780]: I1210 10:53:47.314440 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 10 10:53:48 crc kubenswrapper[4780]: I1210 10:53:48.990756 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b","Type":"ContainerStarted","Data":"eff46b4a34e73ffe9b0073492ea5a1008b64b4229dbd8e0597939cf011a2c0ed"} Dec 10 10:53:48 crc kubenswrapper[4780]: I1210 10:53:48.992731 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" event={"ID":"faca989e-eb37-4004-8522-de2faeee92e3","Type":"ContainerStarted","Data":"8364282f7bdf39988e721a9b5b2a75cc9f13290a82f61d000c9cc3ea14f2201a"} Dec 10 10:53:49 crc kubenswrapper[4780]: I1210 10:53:48.994531 4780 generic.go:334] "Generic (PLEG): container finished" podID="bb29164d-ab20-4069-88fd-3e44aaf2548e" containerID="c930e8f0388b884110edc2b236f708fab771f492ed4b75526eb703ce18916a12" exitCode=0 Dec 10 10:53:49 crc kubenswrapper[4780]: I1210 10:53:48.994679 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"bb29164d-ab20-4069-88fd-3e44aaf2548e","Type":"ContainerDied","Data":"c930e8f0388b884110edc2b236f708fab771f492ed4b75526eb703ce18916a12"} Dec 10 10:53:49 crc kubenswrapper[4780]: I1210 10:53:48.994738 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"bb29164d-ab20-4069-88fd-3e44aaf2548e","Type":"ContainerStarted","Data":"4e61483ebbb63e6620027647c55a27603311669af36db06e24d06e39e7c9584a"} Dec 10 10:53:50 crc kubenswrapper[4780]: I1210 10:53:50.249950 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" event={"ID":"faca989e-eb37-4004-8522-de2faeee92e3","Type":"ContainerStarted","Data":"b1a7c958dd297d4ca8d95108b263c32073556023288c35ae1cf04d80bf2768fc"} Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.257909 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" event={"ID":"fe22a59d-5885-47fe-a089-d4ffdd1e94ba","Type":"ContainerStarted","Data":"9985fb51f8b83bea12b4677496d66b082f800a1e98f5a8a02db9e660709b2ff1"} Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.262855 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b","Type":"ContainerStarted","Data":"9e347b35f275a8b0b02df0707ac974ce8e8c5b4b3521b05268015362366a00bf"} Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.262953 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b","Type":"ContainerStarted","Data":"aed7977c875c72dee9d553fdee9e9db70b0f974d63f6dd3da0c6c7ea548aac68"} Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.262971 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b","Type":"ContainerStarted","Data":"55f6781f734f29f16d05eff08838efe25ac9ddadeb0ada1f19c3e1e2c6b8265a"} Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.262988 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b","Type":"ContainerStarted","Data":"6db604a4b2517145f00959e6c913d1575a1bbc7be96fa6c93c2b00720d86b454"} Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.265995 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" event={"ID":"faca989e-eb37-4004-8522-de2faeee92e3","Type":"ContainerStarted","Data":"34e74d76c613aaa0b9b78d73fd72efd32be8b4d837203b44e8e1347a46cae89a"} Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.267287 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" event={"ID":"28e28f0d-07a7-46ce-8d47-5f1b8682faaf","Type":"ContainerStarted","Data":"bbe1ccf843fa92aef2cb8f82cac63d06541c40aa11b3e99ae6377983e05f5723"} Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.268492 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.274274 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.318592 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-6555675455-shs6d" podStartSLOduration=2.747175839 podStartE2EDuration="6.318538927s" podCreationTimestamp="2025-12-10 10:53:45 +0000 UTC" firstStartedPulling="2025-12-10 10:53:46.640968954 +0000 UTC m=+531.494362397" lastFinishedPulling="2025-12-10 10:53:50.212332042 +0000 UTC m=+535.065725485" observedRunningTime="2025-12-10 10:53:51.302986214 +0000 UTC m=+536.156379687" watchObservedRunningTime="2025-12-10 10:53:51.318538927 +0000 UTC m=+536.171932380" Dec 10 10:53:51 crc kubenswrapper[4780]: I1210 10:53:51.325983 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" podStartSLOduration=2.481870152 podStartE2EDuration="6.325950603s" podCreationTimestamp="2025-12-10 10:53:45 +0000 UTC" firstStartedPulling="2025-12-10 10:53:46.439498111 +0000 UTC m=+531.292891554" lastFinishedPulling="2025-12-10 10:53:50.283578572 +0000 UTC m=+535.136972005" observedRunningTime="2025-12-10 10:53:51.285431769 +0000 UTC m=+536.138825212" watchObservedRunningTime="2025-12-10 10:53:51.325950603 +0000 UTC m=+536.179344046" Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.298629 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.299496 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.309651 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.473424 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"b41fe3df-3e49-4ee5-8f9c-e2f725888b4b","Type":"ContainerStarted","Data":"b5d231b48805c834103a306b3635c71483778dcfb3913e7fc7c08dd5c2103915"} Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.483130 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" event={"ID":"faca989e-eb37-4004-8522-de2faeee92e3","Type":"ContainerStarted","Data":"f9d1681a71039f5317f155dd0ca89bf589d60b91fb138efdd78fb3165e0f5779"} Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.483195 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" event={"ID":"faca989e-eb37-4004-8522-de2faeee92e3","Type":"ContainerStarted","Data":"aad0fbd2745ee733b4a32252a45d1b6f5a7fa6a749d8cc67e3de24a04bbfe4ce"} Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.495400 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"bb29164d-ab20-4069-88fd-3e44aaf2548e","Type":"ContainerStarted","Data":"b4de9cd14a437838166704f21e6daacd8ef6a651845ebd969ba19630ba6ea2cb"} Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.495579 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"bb29164d-ab20-4069-88fd-3e44aaf2548e","Type":"ContainerStarted","Data":"6a426d9bf708e2c976b8fa2e7488283c78e333935b221451a089783df20cf071"} Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.499147 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.566624 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=1.931555355 podStartE2EDuration="14.566593461s" podCreationTimestamp="2025-12-10 10:53:41 +0000 UTC" firstStartedPulling="2025-12-10 10:53:42.017814303 +0000 UTC m=+526.871207746" lastFinishedPulling="2025-12-10 10:53:54.652852409 +0000 UTC m=+539.506245852" observedRunningTime="2025-12-10 10:53:55.535425264 +0000 UTC m=+540.388818717" watchObservedRunningTime="2025-12-10 10:53:55.566593461 +0000 UTC m=+540.419986904" Dec 10 10:53:55 crc kubenswrapper[4780]: I1210 10:53:55.635420 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-2dwc9"] Dec 10 10:53:56 crc kubenswrapper[4780]: I1210 10:53:56.505296 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"bb29164d-ab20-4069-88fd-3e44aaf2548e","Type":"ContainerStarted","Data":"24a4bfe63bd5c5724f5c793541a310a967b89f0189c4b4a191de4818ced88b5e"} Dec 10 10:53:56 crc kubenswrapper[4780]: I1210 10:53:56.505669 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"bb29164d-ab20-4069-88fd-3e44aaf2548e","Type":"ContainerStarted","Data":"7af6c4244d1d04d1982c34c9a4bda452da50497d838b5b937d995e285f5ce22e"} Dec 10 10:53:56 crc kubenswrapper[4780]: I1210 10:53:56.505802 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"bb29164d-ab20-4069-88fd-3e44aaf2548e","Type":"ContainerStarted","Data":"17fed5f1fe8ef3159a4b9bf078683c90b15739b1fde04eed895b1830edc59816"} Dec 10 10:53:56 crc kubenswrapper[4780]: I1210 10:53:56.505823 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"bb29164d-ab20-4069-88fd-3e44aaf2548e","Type":"ContainerStarted","Data":"2e5c1f75692e700e47f520faeddf28c6fcb064e1720f4c504b33b9e6e0bd4cdd"} Dec 10 10:53:56 crc kubenswrapper[4780]: I1210 10:53:56.509146 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" event={"ID":"faca989e-eb37-4004-8522-de2faeee92e3","Type":"ContainerStarted","Data":"e087a48422086b3ee4eb71b25ca63c1d5cc75d8fdb0227543b1f82cfdd2884c1"} Dec 10 10:53:56 crc kubenswrapper[4780]: I1210 10:53:56.553872 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=4.877959464 podStartE2EDuration="10.553839412s" podCreationTimestamp="2025-12-10 10:53:46 +0000 UTC" firstStartedPulling="2025-12-10 10:53:49.005352484 +0000 UTC m=+533.858745917" lastFinishedPulling="2025-12-10 10:53:54.681232422 +0000 UTC m=+539.534625865" observedRunningTime="2025-12-10 10:53:56.545571382 +0000 UTC m=+541.398964825" watchObservedRunningTime="2025-12-10 10:53:56.553839412 +0000 UTC m=+541.407232855" Dec 10 10:53:56 crc kubenswrapper[4780]: I1210 10:53:56.582026 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" podStartSLOduration=4.008006838 podStartE2EDuration="14.581978258s" podCreationTimestamp="2025-12-10 10:53:42 +0000 UTC" firstStartedPulling="2025-12-10 10:53:44.076399423 +0000 UTC m=+528.929792866" lastFinishedPulling="2025-12-10 10:53:54.650370843 +0000 UTC m=+539.503764286" observedRunningTime="2025-12-10 10:53:56.579282016 +0000 UTC m=+541.432675459" watchObservedRunningTime="2025-12-10 10:53:56.581978258 +0000 UTC m=+541.435371701" Dec 10 10:53:56 crc kubenswrapper[4780]: I1210 10:53:56.851508 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:53:57 crc kubenswrapper[4780]: I1210 10:53:57.432872 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:53:57 crc kubenswrapper[4780]: I1210 10:53:57.443803 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-5dc6df75d9-4ps6z" Dec 10 10:54:05 crc kubenswrapper[4780]: I1210 10:54:05.835716 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:54:05 crc kubenswrapper[4780]: I1210 10:54:05.836386 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:54:20 crc kubenswrapper[4780]: I1210 10:54:20.701160 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-2dwc9" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerName="console" containerID="cri-o://5e17e2d0b7cd069de3f55f9447d591c50652d6787ef5f6718ef18da28ac0e4f8" gracePeriod=15 Dec 10 10:54:23 crc kubenswrapper[4780]: I1210 10:54:23.778117 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-2dwc9_c79bfa07-4a71-4560-b706-ac6c81b10ddc/console/0.log" Dec 10 10:54:23 crc kubenswrapper[4780]: I1210 10:54:23.778770 4780 generic.go:334] "Generic (PLEG): container finished" podID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerID="5e17e2d0b7cd069de3f55f9447d591c50652d6787ef5f6718ef18da28ac0e4f8" exitCode=2 Dec 10 10:54:23 crc kubenswrapper[4780]: I1210 10:54:23.778830 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-2dwc9" event={"ID":"c79bfa07-4a71-4560-b706-ac6c81b10ddc","Type":"ContainerDied","Data":"5e17e2d0b7cd069de3f55f9447d591c50652d6787ef5f6718ef18da28ac0e4f8"} Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.355868 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-2dwc9_c79bfa07-4a71-4560-b706-ac6c81b10ddc/console/0.log" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.356370 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.491037 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-service-ca\") pod \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.491130 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-serving-cert\") pod \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.491181 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xztxr\" (UniqueName: \"kubernetes.io/projected/c79bfa07-4a71-4560-b706-ac6c81b10ddc-kube-api-access-xztxr\") pod \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.491256 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-config\") pod \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.491275 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-trusted-ca-bundle\") pod \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.491295 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-oauth-serving-cert\") pod \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.491403 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-oauth-config\") pod \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\" (UID: \"c79bfa07-4a71-4560-b706-ac6c81b10ddc\") " Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.492926 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "c79bfa07-4a71-4560-b706-ac6c81b10ddc" (UID: "c79bfa07-4a71-4560-b706-ac6c81b10ddc"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.493224 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-config" (OuterVolumeSpecName: "console-config") pod "c79bfa07-4a71-4560-b706-ac6c81b10ddc" (UID: "c79bfa07-4a71-4560-b706-ac6c81b10ddc"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.493763 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-service-ca" (OuterVolumeSpecName: "service-ca") pod "c79bfa07-4a71-4560-b706-ac6c81b10ddc" (UID: "c79bfa07-4a71-4560-b706-ac6c81b10ddc"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.493793 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "c79bfa07-4a71-4560-b706-ac6c81b10ddc" (UID: "c79bfa07-4a71-4560-b706-ac6c81b10ddc"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.499835 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "c79bfa07-4a71-4560-b706-ac6c81b10ddc" (UID: "c79bfa07-4a71-4560-b706-ac6c81b10ddc"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.500635 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c79bfa07-4a71-4560-b706-ac6c81b10ddc-kube-api-access-xztxr" (OuterVolumeSpecName: "kube-api-access-xztxr") pod "c79bfa07-4a71-4560-b706-ac6c81b10ddc" (UID: "c79bfa07-4a71-4560-b706-ac6c81b10ddc"). InnerVolumeSpecName "kube-api-access-xztxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.500628 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "c79bfa07-4a71-4560-b706-ac6c81b10ddc" (UID: "c79bfa07-4a71-4560-b706-ac6c81b10ddc"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.593341 4780 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.593426 4780 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.593460 4780 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.593471 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xztxr\" (UniqueName: \"kubernetes.io/projected/c79bfa07-4a71-4560-b706-ac6c81b10ddc-kube-api-access-xztxr\") on node \"crc\" DevicePath \"\"" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.593483 4780 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.593495 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.593508 4780 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/c79bfa07-4a71-4560-b706-ac6c81b10ddc-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.786780 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-2dwc9_c79bfa07-4a71-4560-b706-ac6c81b10ddc/console/0.log" Dec 10 10:54:24 crc kubenswrapper[4780]: I1210 10:54:24.786851 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-2dwc9" event={"ID":"c79bfa07-4a71-4560-b706-ac6c81b10ddc","Type":"ContainerDied","Data":"a6c3b1e9ea662359b1d06586394b675351713263c106da4b91d12fa42f9905fb"} Dec 10 10:54:25 crc kubenswrapper[4780]: I1210 10:54:24.786983 4780 scope.go:117] "RemoveContainer" containerID="5e17e2d0b7cd069de3f55f9447d591c50652d6787ef5f6718ef18da28ac0e4f8" Dec 10 10:54:25 crc kubenswrapper[4780]: I1210 10:54:24.786990 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-2dwc9" Dec 10 10:54:25 crc kubenswrapper[4780]: I1210 10:54:24.840004 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-2dwc9"] Dec 10 10:54:25 crc kubenswrapper[4780]: I1210 10:54:24.846592 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-2dwc9"] Dec 10 10:54:25 crc kubenswrapper[4780]: I1210 10:54:25.845130 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:54:25 crc kubenswrapper[4780]: I1210 10:54:25.850534 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" Dec 10 10:54:25 crc kubenswrapper[4780]: I1210 10:54:25.980451 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" path="/var/lib/kubelet/pods/c79bfa07-4a71-4560-b706-ac6c81b10ddc/volumes" Dec 10 10:54:46 crc kubenswrapper[4780]: I1210 10:54:46.851904 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:54:46 crc kubenswrapper[4780]: I1210 10:54:46.898021 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:54:47 crc kubenswrapper[4780]: I1210 10:54:47.045496 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Dec 10 10:54:56 crc kubenswrapper[4780]: I1210 10:54:56.738179 4780 scope.go:117] "RemoveContainer" containerID="80d3de265f9a2be3c2dbe0286029e1f007b739e5f65470db6f17a6bb88c273ff" Dec 10 10:54:56 crc kubenswrapper[4780]: I1210 10:54:56.771529 4780 scope.go:117] "RemoveContainer" containerID="6d525e6dca5c42e82d262ae4b4375a3b7649063384c24d0217f2b182b2c71036" Dec 10 10:55:27 crc kubenswrapper[4780]: I1210 10:55:27.476102 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:55:27 crc kubenswrapper[4780]: I1210 10:55:27.476871 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.450811 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5db8778c9-c5z2p"] Dec 10 10:55:49 crc kubenswrapper[4780]: E1210 10:55:49.452110 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerName="console" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.452152 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerName="console" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.452412 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c79bfa07-4a71-4560-b706-ac6c81b10ddc" containerName="console" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.453969 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.476911 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5db8778c9-c5z2p"] Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.589248 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-console-config\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.589334 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-oauth-serving-cert\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.589385 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-service-ca\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.589804 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-serving-cert\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.589896 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-trusted-ca-bundle\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.590630 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rvj5\" (UniqueName: \"kubernetes.io/projected/ff1667d0-453d-4b4f-bced-0452da294f98-kube-api-access-9rvj5\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.590704 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-oauth-config\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.692406 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-service-ca\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.692524 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-serving-cert\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.692548 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-trusted-ca-bundle\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.693863 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rvj5\" (UniqueName: \"kubernetes.io/projected/ff1667d0-453d-4b4f-bced-0452da294f98-kube-api-access-9rvj5\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.693903 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-service-ca\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.693911 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-oauth-config\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.694080 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-console-config\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.694123 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-trusted-ca-bundle\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.694174 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-oauth-serving-cert\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.695143 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-oauth-serving-cert\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.695454 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-console-config\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.700168 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-serving-cert\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.700971 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-oauth-config\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.720094 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rvj5\" (UniqueName: \"kubernetes.io/projected/ff1667d0-453d-4b4f-bced-0452da294f98-kube-api-access-9rvj5\") pod \"console-5db8778c9-c5z2p\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:49 crc kubenswrapper[4780]: I1210 10:55:49.781537 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:50 crc kubenswrapper[4780]: I1210 10:55:50.052421 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5db8778c9-c5z2p"] Dec 10 10:55:50 crc kubenswrapper[4780]: I1210 10:55:50.134250 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5db8778c9-c5z2p" event={"ID":"ff1667d0-453d-4b4f-bced-0452da294f98","Type":"ContainerStarted","Data":"62b3cba350ecfc7bf41e054269b20869c16efce0e9b65ec93818587a78e1d17b"} Dec 10 10:55:52 crc kubenswrapper[4780]: I1210 10:55:52.154908 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5db8778c9-c5z2p" event={"ID":"ff1667d0-453d-4b4f-bced-0452da294f98","Type":"ContainerStarted","Data":"1d77a9bacf7497f45a701d126816fd8b00288797839a1f4043e23167335196c0"} Dec 10 10:55:52 crc kubenswrapper[4780]: I1210 10:55:52.189278 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5db8778c9-c5z2p" podStartSLOduration=3.189227763 podStartE2EDuration="3.189227763s" podCreationTimestamp="2025-12-10 10:55:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:55:52.181799668 +0000 UTC m=+657.035193111" watchObservedRunningTime="2025-12-10 10:55:52.189227763 +0000 UTC m=+657.042621206" Dec 10 10:55:57 crc kubenswrapper[4780]: I1210 10:55:57.475442 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:55:57 crc kubenswrapper[4780]: I1210 10:55:57.475909 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:55:59 crc kubenswrapper[4780]: I1210 10:55:59.782812 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:59 crc kubenswrapper[4780]: I1210 10:55:59.783271 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:55:59 crc kubenswrapper[4780]: I1210 10:55:59.787536 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:56:00 crc kubenswrapper[4780]: I1210 10:56:00.214487 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 10:56:00 crc kubenswrapper[4780]: I1210 10:56:00.288913 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-549cdbd99b-lf7hk"] Dec 10 10:56:25 crc kubenswrapper[4780]: I1210 10:56:25.331822 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-549cdbd99b-lf7hk" podUID="1a100f8e-37f1-4624-be03-49e295e939ce" containerName="console" containerID="cri-o://61cc02824914de77e4dcbd6f97fff7e54cda37d4d1c01b03b840d73724d43880" gracePeriod=15 Dec 10 10:56:25 crc kubenswrapper[4780]: I1210 10:56:25.572486 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-549cdbd99b-lf7hk_1a100f8e-37f1-4624-be03-49e295e939ce/console/0.log" Dec 10 10:56:25 crc kubenswrapper[4780]: I1210 10:56:25.572552 4780 generic.go:334] "Generic (PLEG): container finished" podID="1a100f8e-37f1-4624-be03-49e295e939ce" containerID="61cc02824914de77e4dcbd6f97fff7e54cda37d4d1c01b03b840d73724d43880" exitCode=2 Dec 10 10:56:25 crc kubenswrapper[4780]: I1210 10:56:25.572599 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-549cdbd99b-lf7hk" event={"ID":"1a100f8e-37f1-4624-be03-49e295e939ce","Type":"ContainerDied","Data":"61cc02824914de77e4dcbd6f97fff7e54cda37d4d1c01b03b840d73724d43880"} Dec 10 10:56:25 crc kubenswrapper[4780]: I1210 10:56:25.840870 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-549cdbd99b-lf7hk_1a100f8e-37f1-4624-be03-49e295e939ce/console/0.log" Dec 10 10:56:25 crc kubenswrapper[4780]: I1210 10:56:25.842076 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.007999 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-oauth-serving-cert\") pod \"1a100f8e-37f1-4624-be03-49e295e939ce\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.008075 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-serving-cert\") pod \"1a100f8e-37f1-4624-be03-49e295e939ce\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.008176 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s56ss\" (UniqueName: \"kubernetes.io/projected/1a100f8e-37f1-4624-be03-49e295e939ce-kube-api-access-s56ss\") pod \"1a100f8e-37f1-4624-be03-49e295e939ce\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.008202 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-trusted-ca-bundle\") pod \"1a100f8e-37f1-4624-be03-49e295e939ce\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.008265 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-service-ca\") pod \"1a100f8e-37f1-4624-be03-49e295e939ce\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.008346 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-oauth-config\") pod \"1a100f8e-37f1-4624-be03-49e295e939ce\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.008367 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-console-config\") pod \"1a100f8e-37f1-4624-be03-49e295e939ce\" (UID: \"1a100f8e-37f1-4624-be03-49e295e939ce\") " Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.009617 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "1a100f8e-37f1-4624-be03-49e295e939ce" (UID: "1a100f8e-37f1-4624-be03-49e295e939ce"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.009679 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1a100f8e-37f1-4624-be03-49e295e939ce" (UID: "1a100f8e-37f1-4624-be03-49e295e939ce"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.009940 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-console-config" (OuterVolumeSpecName: "console-config") pod "1a100f8e-37f1-4624-be03-49e295e939ce" (UID: "1a100f8e-37f1-4624-be03-49e295e939ce"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.011056 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-service-ca" (OuterVolumeSpecName: "service-ca") pod "1a100f8e-37f1-4624-be03-49e295e939ce" (UID: "1a100f8e-37f1-4624-be03-49e295e939ce"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.015037 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a100f8e-37f1-4624-be03-49e295e939ce-kube-api-access-s56ss" (OuterVolumeSpecName: "kube-api-access-s56ss") pod "1a100f8e-37f1-4624-be03-49e295e939ce" (UID: "1a100f8e-37f1-4624-be03-49e295e939ce"). InnerVolumeSpecName "kube-api-access-s56ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.015468 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "1a100f8e-37f1-4624-be03-49e295e939ce" (UID: "1a100f8e-37f1-4624-be03-49e295e939ce"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.020338 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "1a100f8e-37f1-4624-be03-49e295e939ce" (UID: "1a100f8e-37f1-4624-be03-49e295e939ce"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.111391 4780 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.112774 4780 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.112908 4780 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.112955 4780 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1a100f8e-37f1-4624-be03-49e295e939ce-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.112969 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s56ss\" (UniqueName: \"kubernetes.io/projected/1a100f8e-37f1-4624-be03-49e295e939ce-kube-api-access-s56ss\") on node \"crc\" DevicePath \"\"" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.112995 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.113015 4780 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a100f8e-37f1-4624-be03-49e295e939ce-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.584059 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-549cdbd99b-lf7hk_1a100f8e-37f1-4624-be03-49e295e939ce/console/0.log" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.584148 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-549cdbd99b-lf7hk" event={"ID":"1a100f8e-37f1-4624-be03-49e295e939ce","Type":"ContainerDied","Data":"00ac0b46f9e59552433ef35e883d4a539c6983a58fb9e4dc719ae271008c3dc1"} Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.584286 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-549cdbd99b-lf7hk" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.584307 4780 scope.go:117] "RemoveContainer" containerID="61cc02824914de77e4dcbd6f97fff7e54cda37d4d1c01b03b840d73724d43880" Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.640118 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-549cdbd99b-lf7hk"] Dec 10 10:56:26 crc kubenswrapper[4780]: I1210 10:56:26.644441 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-549cdbd99b-lf7hk"] Dec 10 10:56:27 crc kubenswrapper[4780]: I1210 10:56:27.476370 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:56:27 crc kubenswrapper[4780]: I1210 10:56:27.476832 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:56:27 crc kubenswrapper[4780]: I1210 10:56:27.476903 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:56:27 crc kubenswrapper[4780]: I1210 10:56:27.478069 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"36d00436bbc9ae1da1897b8b9f2c3475af18239ee7063fe90cc695128e282bd3"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 10:56:27 crc kubenswrapper[4780]: I1210 10:56:27.478160 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://36d00436bbc9ae1da1897b8b9f2c3475af18239ee7063fe90cc695128e282bd3" gracePeriod=600 Dec 10 10:56:27 crc kubenswrapper[4780]: I1210 10:56:27.969422 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a100f8e-37f1-4624-be03-49e295e939ce" path="/var/lib/kubelet/pods/1a100f8e-37f1-4624-be03-49e295e939ce/volumes" Dec 10 10:56:28 crc kubenswrapper[4780]: I1210 10:56:28.606576 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="36d00436bbc9ae1da1897b8b9f2c3475af18239ee7063fe90cc695128e282bd3" exitCode=0 Dec 10 10:56:28 crc kubenswrapper[4780]: I1210 10:56:28.606680 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"36d00436bbc9ae1da1897b8b9f2c3475af18239ee7063fe90cc695128e282bd3"} Dec 10 10:56:28 crc kubenswrapper[4780]: I1210 10:56:28.607086 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"3fbfe685041c9fd303141118710f14f576d10f3417446140048debbbb20a3ef0"} Dec 10 10:56:28 crc kubenswrapper[4780]: I1210 10:56:28.607117 4780 scope.go:117] "RemoveContainer" containerID="6ebc39bea1992f54cd24fdfecca195ad14903b8ade84ac83330f2cc7cf317153" Dec 10 10:56:56 crc kubenswrapper[4780]: I1210 10:56:56.834248 4780 scope.go:117] "RemoveContainer" containerID="181ef86f1d7a7ee5f8cbd8c62ee4779fa234fe9410486b1d0b94337383074784" Dec 10 10:56:56 crc kubenswrapper[4780]: I1210 10:56:56.866303 4780 scope.go:117] "RemoveContainer" containerID="26b24261800cb493594541c099e3f0c0ca1751477a786957168881d45803aa1f" Dec 10 10:56:56 crc kubenswrapper[4780]: I1210 10:56:56.901934 4780 scope.go:117] "RemoveContainer" containerID="cd21d171f98d7d75bd19036bf11078aca3d3b53a7b74ce3f019d76669a9c58f5" Dec 10 10:56:56 crc kubenswrapper[4780]: I1210 10:56:56.923371 4780 scope.go:117] "RemoveContainer" containerID="40bd3673ff7560022ea7f5b86aae2064c5fdb9df3c95e9b91200aee7dcf9f287" Dec 10 10:58:28 crc kubenswrapper[4780]: I1210 10:58:28.980816 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp"] Dec 10 10:58:28 crc kubenswrapper[4780]: E1210 10:58:28.982326 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a100f8e-37f1-4624-be03-49e295e939ce" containerName="console" Dec 10 10:58:28 crc kubenswrapper[4780]: I1210 10:58:28.982401 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a100f8e-37f1-4624-be03-49e295e939ce" containerName="console" Dec 10 10:58:28 crc kubenswrapper[4780]: I1210 10:58:28.982769 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a100f8e-37f1-4624-be03-49e295e939ce" containerName="console" Dec 10 10:58:28 crc kubenswrapper[4780]: I1210 10:58:28.985821 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:28 crc kubenswrapper[4780]: I1210 10:58:28.988734 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.005506 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp"] Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.116683 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.116783 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28cb7\" (UniqueName: \"kubernetes.io/projected/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-kube-api-access-28cb7\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.116820 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.219178 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.219309 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28cb7\" (UniqueName: \"kubernetes.io/projected/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-kube-api-access-28cb7\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.219347 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.220352 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.220525 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.250493 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28cb7\" (UniqueName: \"kubernetes.io/projected/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-kube-api-access-28cb7\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:29 crc kubenswrapper[4780]: I1210 10:58:29.319223 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:30 crc kubenswrapper[4780]: I1210 10:58:30.309533 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp"] Dec 10 10:58:30 crc kubenswrapper[4780]: I1210 10:58:30.768272 4780 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 10:58:30 crc kubenswrapper[4780]: I1210 10:58:30.888206 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" event={"ID":"8a9d08c5-4ca7-4ef1-b60e-60301f00728b","Type":"ContainerStarted","Data":"b002ef5c28ed555189dd8bed359130744506384dcc958df6af983153ecb660b7"} Dec 10 10:58:30 crc kubenswrapper[4780]: I1210 10:58:30.888292 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" event={"ID":"8a9d08c5-4ca7-4ef1-b60e-60301f00728b","Type":"ContainerStarted","Data":"968b648ab50f7c0ae90ef083d0c76e8f093e23ea63f090f7bccca739d05c577b"} Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.096741 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mkxpc"] Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.098489 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.114804 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mkxpc"] Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.209191 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f85d01b9-be91-4ff9-bf9d-886fa995d582-catalog-content\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.209347 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f85d01b9-be91-4ff9-bf9d-886fa995d582-utilities\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.209377 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5wxk\" (UniqueName: \"kubernetes.io/projected/f85d01b9-be91-4ff9-bf9d-886fa995d582-kube-api-access-q5wxk\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.311251 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f85d01b9-be91-4ff9-bf9d-886fa995d582-catalog-content\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.311360 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f85d01b9-be91-4ff9-bf9d-886fa995d582-utilities\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.311398 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5wxk\" (UniqueName: \"kubernetes.io/projected/f85d01b9-be91-4ff9-bf9d-886fa995d582-kube-api-access-q5wxk\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.312609 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f85d01b9-be91-4ff9-bf9d-886fa995d582-catalog-content\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.313077 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f85d01b9-be91-4ff9-bf9d-886fa995d582-utilities\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.351142 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5wxk\" (UniqueName: \"kubernetes.io/projected/f85d01b9-be91-4ff9-bf9d-886fa995d582-kube-api-access-q5wxk\") pod \"redhat-operators-mkxpc\" (UID: \"f85d01b9-be91-4ff9-bf9d-886fa995d582\") " pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.467318 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.901745 4780 generic.go:334] "Generic (PLEG): container finished" podID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerID="b002ef5c28ed555189dd8bed359130744506384dcc958df6af983153ecb660b7" exitCode=0 Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.902396 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" event={"ID":"8a9d08c5-4ca7-4ef1-b60e-60301f00728b","Type":"ContainerDied","Data":"b002ef5c28ed555189dd8bed359130744506384dcc958df6af983153ecb660b7"} Dec 10 10:58:31 crc kubenswrapper[4780]: I1210 10:58:31.914452 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 10:58:32 crc kubenswrapper[4780]: I1210 10:58:32.055682 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mkxpc"] Dec 10 10:58:32 crc kubenswrapper[4780]: I1210 10:58:32.912221 4780 generic.go:334] "Generic (PLEG): container finished" podID="f85d01b9-be91-4ff9-bf9d-886fa995d582" containerID="7f77680c0fe69b6b82f90eaccbb8043ee4ee8eea9b150e4b31cdc743a7d92d0d" exitCode=0 Dec 10 10:58:32 crc kubenswrapper[4780]: I1210 10:58:32.912335 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mkxpc" event={"ID":"f85d01b9-be91-4ff9-bf9d-886fa995d582","Type":"ContainerDied","Data":"7f77680c0fe69b6b82f90eaccbb8043ee4ee8eea9b150e4b31cdc743a7d92d0d"} Dec 10 10:58:32 crc kubenswrapper[4780]: I1210 10:58:32.912382 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mkxpc" event={"ID":"f85d01b9-be91-4ff9-bf9d-886fa995d582","Type":"ContainerStarted","Data":"4da3d9f7c5d9b14f9a3e2bb43e44cc8955526ccdb0fe2bef8400a87900a62994"} Dec 10 10:58:33 crc kubenswrapper[4780]: I1210 10:58:33.922128 4780 generic.go:334] "Generic (PLEG): container finished" podID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerID="bfbe58686f458af242a23cb0ecad08a68baf93de0abb3e2d1a04d76ad76619ae" exitCode=0 Dec 10 10:58:33 crc kubenswrapper[4780]: I1210 10:58:33.922381 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" event={"ID":"8a9d08c5-4ca7-4ef1-b60e-60301f00728b","Type":"ContainerDied","Data":"bfbe58686f458af242a23cb0ecad08a68baf93de0abb3e2d1a04d76ad76619ae"} Dec 10 10:58:34 crc kubenswrapper[4780]: I1210 10:58:34.932677 4780 generic.go:334] "Generic (PLEG): container finished" podID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerID="c7cf547fc8482f078df3f7a67520ff3794030512a0eede4b0d789d53567ed5dd" exitCode=0 Dec 10 10:58:34 crc kubenswrapper[4780]: I1210 10:58:34.932744 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" event={"ID":"8a9d08c5-4ca7-4ef1-b60e-60301f00728b","Type":"ContainerDied","Data":"c7cf547fc8482f078df3f7a67520ff3794030512a0eede4b0d789d53567ed5dd"} Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.437998 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.535559 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-util\") pod \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.535717 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-bundle\") pod \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.535782 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28cb7\" (UniqueName: \"kubernetes.io/projected/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-kube-api-access-28cb7\") pod \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\" (UID: \"8a9d08c5-4ca7-4ef1-b60e-60301f00728b\") " Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.538655 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-bundle" (OuterVolumeSpecName: "bundle") pod "8a9d08c5-4ca7-4ef1-b60e-60301f00728b" (UID: "8a9d08c5-4ca7-4ef1-b60e-60301f00728b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.544403 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-kube-api-access-28cb7" (OuterVolumeSpecName: "kube-api-access-28cb7") pod "8a9d08c5-4ca7-4ef1-b60e-60301f00728b" (UID: "8a9d08c5-4ca7-4ef1-b60e-60301f00728b"). InnerVolumeSpecName "kube-api-access-28cb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.555218 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-util" (OuterVolumeSpecName: "util") pod "8a9d08c5-4ca7-4ef1-b60e-60301f00728b" (UID: "8a9d08c5-4ca7-4ef1-b60e-60301f00728b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.637475 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28cb7\" (UniqueName: \"kubernetes.io/projected/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-kube-api-access-28cb7\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.637527 4780 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-util\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.637538 4780 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a9d08c5-4ca7-4ef1-b60e-60301f00728b-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.965781 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" event={"ID":"8a9d08c5-4ca7-4ef1-b60e-60301f00728b","Type":"ContainerDied","Data":"968b648ab50f7c0ae90ef083d0c76e8f093e23ea63f090f7bccca739d05c577b"} Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.965865 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="968b648ab50f7c0ae90ef083d0c76e8f093e23ea63f090f7bccca739d05c577b" Dec 10 10:58:36 crc kubenswrapper[4780]: I1210 10:58:36.965885 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp" Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.180960 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fpl55"] Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.182779 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovn-controller" containerID="cri-o://c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1" gracePeriod=30 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.183409 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a" gracePeriod=30 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.183502 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="sbdb" containerID="cri-o://df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" gracePeriod=30 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.183564 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="nbdb" containerID="cri-o://cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" gracePeriod=30 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.183619 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="northd" containerID="cri-o://dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f" gracePeriod=30 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.183676 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kube-rbac-proxy-node" containerID="cri-o://ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a" gracePeriod=30 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.183700 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovn-acl-logging" containerID="cri-o://782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c" gracePeriod=30 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.233840 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" containerID="cri-o://3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30" gracePeriod=30 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.993268 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovnkube-controller/3.log" Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.996135 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovn-acl-logging/0.log" Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.996884 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovn-controller/0.log" Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997486 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30" exitCode=0 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997530 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" exitCode=0 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997544 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" exitCode=0 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997555 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f" exitCode=0 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997574 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c" exitCode=143 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997598 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1" exitCode=143 Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997663 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30"} Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997731 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e"} Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997753 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab"} Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997783 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f"} Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997816 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c"} Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997834 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1"} Dec 10 10:58:39 crc kubenswrapper[4780]: I1210 10:58:39.997912 4780 scope.go:117] "RemoveContainer" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:58:40 crc kubenswrapper[4780]: I1210 10:58:40.002111 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/2.log" Dec 10 10:58:40 crc kubenswrapper[4780]: I1210 10:58:40.002574 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/1.log" Dec 10 10:58:40 crc kubenswrapper[4780]: I1210 10:58:40.002617 4780 generic.go:334] "Generic (PLEG): container finished" podID="deadb49b-61b8-435f-8168-d7bd3c01b5ad" containerID="15785f7813590fd04ab190b13bb01dfd35df6b281b159d464b7146b18f150880" exitCode=2 Dec 10 10:58:40 crc kubenswrapper[4780]: I1210 10:58:40.002648 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8cwb7" event={"ID":"deadb49b-61b8-435f-8168-d7bd3c01b5ad","Type":"ContainerDied","Data":"15785f7813590fd04ab190b13bb01dfd35df6b281b159d464b7146b18f150880"} Dec 10 10:58:40 crc kubenswrapper[4780]: I1210 10:58:40.003790 4780 scope.go:117] "RemoveContainer" containerID="15785f7813590fd04ab190b13bb01dfd35df6b281b159d464b7146b18f150880" Dec 10 10:58:41 crc kubenswrapper[4780]: I1210 10:58:41.038992 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovn-acl-logging/0.log" Dec 10 10:58:41 crc kubenswrapper[4780]: I1210 10:58:41.045534 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovn-controller/0.log" Dec 10 10:58:41 crc kubenswrapper[4780]: I1210 10:58:41.046551 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a" exitCode=0 Dec 10 10:58:41 crc kubenswrapper[4780]: I1210 10:58:41.046615 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a"} Dec 10 10:58:42 crc kubenswrapper[4780]: I1210 10:58:42.070819 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovn-acl-logging/0.log" Dec 10 10:58:42 crc kubenswrapper[4780]: I1210 10:58:42.072505 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovn-controller/0.log" Dec 10 10:58:42 crc kubenswrapper[4780]: I1210 10:58:42.073202 4780 generic.go:334] "Generic (PLEG): container finished" podID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerID="ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a" exitCode=0 Dec 10 10:58:42 crc kubenswrapper[4780]: I1210 10:58:42.073285 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a"} Dec 10 10:58:47 crc kubenswrapper[4780]: E1210 10:58:47.548381 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e is running failed: container process not found" containerID="df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 10 10:58:47 crc kubenswrapper[4780]: E1210 10:58:47.548347 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab is running failed: container process not found" containerID="cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 10 10:58:47 crc kubenswrapper[4780]: E1210 10:58:47.550941 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e is running failed: container process not found" containerID="df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 10 10:58:47 crc kubenswrapper[4780]: E1210 10:58:47.551044 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab is running failed: container process not found" containerID="cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 10 10:58:47 crc kubenswrapper[4780]: E1210 10:58:47.551294 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e is running failed: container process not found" containerID="df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 10 10:58:47 crc kubenswrapper[4780]: E1210 10:58:47.551425 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab is running failed: container process not found" containerID="cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 10 10:58:47 crc kubenswrapper[4780]: E1210 10:58:47.551465 4780 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="nbdb" Dec 10 10:58:47 crc kubenswrapper[4780]: E1210 10:58:47.551425 4780 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="sbdb" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.574403 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5"] Dec 10 10:58:51 crc kubenswrapper[4780]: E1210 10:58:51.575388 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerName="pull" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.575417 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerName="pull" Dec 10 10:58:51 crc kubenswrapper[4780]: E1210 10:58:51.575438 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerName="util" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.575447 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerName="util" Dec 10 10:58:51 crc kubenswrapper[4780]: E1210 10:58:51.575459 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerName="extract" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.575465 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerName="extract" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.575625 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a9d08c5-4ca7-4ef1-b60e-60301f00728b" containerName="extract" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.576336 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.583232 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.584330 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-42xmn" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.600243 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.651601 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw7vd\" (UniqueName: \"kubernetes.io/projected/fd80e9af-20c4-4aaa-9f38-4f46c3b610fb-kube-api-access-mw7vd\") pod \"obo-prometheus-operator-668cf9dfbb-fqxm5\" (UID: \"fd80e9af-20c4-4aaa-9f38-4f46c3b610fb\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.691174 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh"] Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.692525 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.701382 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-c4njt" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.701668 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.717173 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f"] Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.718300 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.752888 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw7vd\" (UniqueName: \"kubernetes.io/projected/fd80e9af-20c4-4aaa-9f38-4f46c3b610fb-kube-api-access-mw7vd\") pod \"obo-prometheus-operator-668cf9dfbb-fqxm5\" (UID: \"fd80e9af-20c4-4aaa-9f38-4f46c3b610fb\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.752974 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/db680a43-b9fa-45d6-b751-f4467cfe5065-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh\" (UID: \"db680a43-b9fa-45d6-b751-f4467cfe5065\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.753023 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/42449c87-f0c6-4433-92cb-f89e51cb5a14-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f\" (UID: \"42449c87-f0c6-4433-92cb-f89e51cb5a14\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.753072 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/42449c87-f0c6-4433-92cb-f89e51cb5a14-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f\" (UID: \"42449c87-f0c6-4433-92cb-f89e51cb5a14\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.753102 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/db680a43-b9fa-45d6-b751-f4467cfe5065-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh\" (UID: \"db680a43-b9fa-45d6-b751-f4467cfe5065\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.817146 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw7vd\" (UniqueName: \"kubernetes.io/projected/fd80e9af-20c4-4aaa-9f38-4f46c3b610fb-kube-api-access-mw7vd\") pod \"obo-prometheus-operator-668cf9dfbb-fqxm5\" (UID: \"fd80e9af-20c4-4aaa-9f38-4f46c3b610fb\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.854813 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/42449c87-f0c6-4433-92cb-f89e51cb5a14-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f\" (UID: \"42449c87-f0c6-4433-92cb-f89e51cb5a14\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.854912 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/42449c87-f0c6-4433-92cb-f89e51cb5a14-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f\" (UID: \"42449c87-f0c6-4433-92cb-f89e51cb5a14\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.854974 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/db680a43-b9fa-45d6-b751-f4467cfe5065-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh\" (UID: \"db680a43-b9fa-45d6-b751-f4467cfe5065\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.855133 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/db680a43-b9fa-45d6-b751-f4467cfe5065-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh\" (UID: \"db680a43-b9fa-45d6-b751-f4467cfe5065\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.862529 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/42449c87-f0c6-4433-92cb-f89e51cb5a14-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f\" (UID: \"42449c87-f0c6-4433-92cb-f89e51cb5a14\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.865388 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/42449c87-f0c6-4433-92cb-f89e51cb5a14-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f\" (UID: \"42449c87-f0c6-4433-92cb-f89e51cb5a14\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.872957 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/db680a43-b9fa-45d6-b751-f4467cfe5065-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh\" (UID: \"db680a43-b9fa-45d6-b751-f4467cfe5065\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.874849 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-kmhks"] Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.875844 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.877566 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/db680a43-b9fa-45d6-b751-f4467cfe5065-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh\" (UID: \"db680a43-b9fa-45d6-b751-f4467cfe5065\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.881643 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.882090 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-sw7lc" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.909351 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.955625 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm79r\" (UniqueName: \"kubernetes.io/projected/780b464e-7e56-49ba-b0af-fc0731e1290d-kube-api-access-vm79r\") pod \"observability-operator-d8bb48f5d-kmhks\" (UID: \"780b464e-7e56-49ba-b0af-fc0731e1290d\") " pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:51 crc kubenswrapper[4780]: I1210 10:58:51.955719 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/780b464e-7e56-49ba-b0af-fc0731e1290d-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-kmhks\" (UID: \"780b464e-7e56-49ba-b0af-fc0731e1290d\") " pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.023820 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.040333 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.044149 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-m5pj4"] Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.045369 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.049186 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-278pt" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.057360 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brhd8\" (UniqueName: \"kubernetes.io/projected/fb9d5eb8-6ba0-4dea-8226-a3e362924f16-kube-api-access-brhd8\") pod \"perses-operator-5446b9c989-m5pj4\" (UID: \"fb9d5eb8-6ba0-4dea-8226-a3e362924f16\") " pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.057448 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/fb9d5eb8-6ba0-4dea-8226-a3e362924f16-openshift-service-ca\") pod \"perses-operator-5446b9c989-m5pj4\" (UID: \"fb9d5eb8-6ba0-4dea-8226-a3e362924f16\") " pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.057491 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm79r\" (UniqueName: \"kubernetes.io/projected/780b464e-7e56-49ba-b0af-fc0731e1290d-kube-api-access-vm79r\") pod \"observability-operator-d8bb48f5d-kmhks\" (UID: \"780b464e-7e56-49ba-b0af-fc0731e1290d\") " pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.057558 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/780b464e-7e56-49ba-b0af-fc0731e1290d-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-kmhks\" (UID: \"780b464e-7e56-49ba-b0af-fc0731e1290d\") " pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.062727 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/780b464e-7e56-49ba-b0af-fc0731e1290d-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-kmhks\" (UID: \"780b464e-7e56-49ba-b0af-fc0731e1290d\") " pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.078769 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm79r\" (UniqueName: \"kubernetes.io/projected/780b464e-7e56-49ba-b0af-fc0731e1290d-kube-api-access-vm79r\") pod \"observability-operator-d8bb48f5d-kmhks\" (UID: \"780b464e-7e56-49ba-b0af-fc0731e1290d\") " pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.159477 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brhd8\" (UniqueName: \"kubernetes.io/projected/fb9d5eb8-6ba0-4dea-8226-a3e362924f16-kube-api-access-brhd8\") pod \"perses-operator-5446b9c989-m5pj4\" (UID: \"fb9d5eb8-6ba0-4dea-8226-a3e362924f16\") " pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.159568 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/fb9d5eb8-6ba0-4dea-8226-a3e362924f16-openshift-service-ca\") pod \"perses-operator-5446b9c989-m5pj4\" (UID: \"fb9d5eb8-6ba0-4dea-8226-a3e362924f16\") " pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.160966 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/fb9d5eb8-6ba0-4dea-8226-a3e362924f16-openshift-service-ca\") pod \"perses-operator-5446b9c989-m5pj4\" (UID: \"fb9d5eb8-6ba0-4dea-8226-a3e362924f16\") " pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.178271 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brhd8\" (UniqueName: \"kubernetes.io/projected/fb9d5eb8-6ba0-4dea-8226-a3e362924f16-kube-api-access-brhd8\") pod \"perses-operator-5446b9c989-m5pj4\" (UID: \"fb9d5eb8-6ba0-4dea-8226-a3e362924f16\") " pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.252629 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:52 crc kubenswrapper[4780]: I1210 10:58:52.417648 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:57 crc kubenswrapper[4780]: I1210 10:58:56.999344 4780 scope.go:117] "RemoveContainer" containerID="8e24d6fa9e64068d44a521036de9669170ec4c27876c8b380e466bbdf0d993dd" Dec 10 10:58:57 crc kubenswrapper[4780]: I1210 10:58:57.475992 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:58:57 crc kubenswrapper[4780]: I1210 10:58:57.476101 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.512852 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab is running failed: container process not found" containerID="cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.513016 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30 is running failed: container process not found" containerID="3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.513078 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e is running failed: container process not found" containerID="df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.513725 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30 is running failed: container process not found" containerID="3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.513792 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e is running failed: container process not found" containerID="df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.513838 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab is running failed: container process not found" containerID="cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.514936 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e is running failed: container process not found" containerID="df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.514965 4780 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="sbdb" Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.515130 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab is running failed: container process not found" containerID="cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.515155 4780 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="nbdb" Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.515269 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30 is running failed: container process not found" containerID="3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.515301 4780 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.917761 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.918480 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q5wxk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-mkxpc_openshift-marketplace(f85d01b9-be91-4ff9-bf9d-886fa995d582): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.919906 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-mkxpc" podUID="f85d01b9-be91-4ff9-bf9d-886fa995d582" Dec 10 10:58:57 crc kubenswrapper[4780]: E1210 10:58:57.994750 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-mkxpc" podUID="f85d01b9-be91-4ff9-bf9d-886fa995d582" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.031492 4780 scope.go:117] "RemoveContainer" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.088596 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovn-acl-logging/0.log" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.090195 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-fpl55_cc22221d-0c02-4e8c-8314-c2e6d9290b5e/ovn-controller/0.log" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.091544 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.096259 4780 scope.go:117] "RemoveContainer" containerID="caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.122332 4780 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_ovnkube-controller_ovnkube-node-fpl55_openshift-ovn-kubernetes_cc22221d-0c02-4e8c-8314-c2e6d9290b5e_3 in pod sandbox 0ae266c98886075cfee6fbd79b266be7ef745f38c6d1283c5150f29e3aea474c from index: no such id: 'ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5'" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.122436 4780 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_ovnkube-controller_ovnkube-node-fpl55_openshift-ovn-kubernetes_cc22221d-0c02-4e8c-8314-c2e6d9290b5e_3 in pod sandbox 0ae266c98886075cfee6fbd79b266be7ef745f38c6d1283c5150f29e3aea474c from index: no such id: 'ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5'" containerID="ff51c6ad85356e07cc2059fe4b05b1061ae2d8eb800f63ba392c8a4f36112ca5" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.122473 4780 scope.go:117] "RemoveContainer" containerID="caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.156676 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(e2a6963ff566d12332f7858e144ef0850967238bab465779b70246c08689a0e6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.156909 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(e2a6963ff566d12332f7858e144ef0850967238bab465779b70246c08689a0e6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.156993 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(e2a6963ff566d12332f7858e144ef0850967238bab465779b70246c08689a0e6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.157223 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-m5pj4_openshift-operators(fb9d5eb8-6ba0-4dea-8226-a3e362924f16)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-m5pj4_openshift-operators(fb9d5eb8-6ba0-4dea-8226-a3e362924f16)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(e2a6963ff566d12332f7858e144ef0850967238bab465779b70246c08689a0e6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" podUID="fb9d5eb8-6ba0-4dea-8226-a3e362924f16" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169045 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-kubelet\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169136 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whmzl\" (UniqueName: \"kubernetes.io/projected/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-kube-api-access-whmzl\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169206 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-script-lib\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169242 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-config\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169269 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-systemd\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169301 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-env-overrides\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169333 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-bin\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169352 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-log-socket\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169383 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-ovn\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169444 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-netd\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169489 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169604 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-slash\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169642 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-node-log\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169672 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169698 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-systemd-units\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169736 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-netns\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169774 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-var-lib-openvswitch\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169804 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-etc-openvswitch\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169826 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-ovn-kubernetes\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169877 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovn-node-metrics-cert\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.169916 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-openvswitch\") pod \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\" (UID: \"cc22221d-0c02-4e8c-8314-c2e6d9290b5e\") " Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170314 4780 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170413 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170454 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170474 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-log-socket" (OuterVolumeSpecName: "log-socket") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170493 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170513 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170532 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-slash" (OuterVolumeSpecName: "host-slash") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170556 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-node-log" (OuterVolumeSpecName: "node-log") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170582 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170626 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170642 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170683 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170660 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170745 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.170786 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.173843 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.179841 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273370 4780 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-slash\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273442 4780 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-node-log\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273465 4780 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273483 4780 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273504 4780 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273523 4780 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273549 4780 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273566 4780 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273595 4780 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273629 4780 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273661 4780 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273688 4780 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273713 4780 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273737 4780 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-log-socket\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273758 4780 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.273782 4780 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.300247 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.301491 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-kube-api-access-whmzl" (OuterVolumeSpecName: "kube-api-access-whmzl") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "kube-api-access-whmzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.304726 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "cc22221d-0c02-4e8c-8314-c2e6d9290b5e" (UID: "cc22221d-0c02-4e8c-8314-c2e6d9290b5e"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.312714 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(a681f563433bc653a8c856e41d8f57b80b06726bac2ba9cf32bf577c4843bd10): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.312782 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(a681f563433bc653a8c856e41d8f57b80b06726bac2ba9cf32bf577c4843bd10): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.312811 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(a681f563433bc653a8c856e41d8f57b80b06726bac2ba9cf32bf577c4843bd10): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.312861 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators(db680a43-b9fa-45d6-b751-f4467cfe5065)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators(db680a43-b9fa-45d6-b751-f4467cfe5065)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(a681f563433bc653a8c856e41d8f57b80b06726bac2ba9cf32bf577c4843bd10): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" podUID="db680a43-b9fa-45d6-b751-f4467cfe5065" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.334522 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(6a521f621f09fbce26e1c37e8e960e63fc4aecb9eee9aa32b7a97e2027646845): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.334602 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(6a521f621f09fbce26e1c37e8e960e63fc4aecb9eee9aa32b7a97e2027646845): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.334626 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(6a521f621f09fbce26e1c37e8e960e63fc4aecb9eee9aa32b7a97e2027646845): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.335072 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-kmhks_openshift-operators(780b464e-7e56-49ba-b0af-fc0731e1290d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-kmhks_openshift-operators(780b464e-7e56-49ba-b0af-fc0731e1290d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(6a521f621f09fbce26e1c37e8e960e63fc4aecb9eee9aa32b7a97e2027646845): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" podUID="780b464e-7e56-49ba-b0af-fc0731e1290d" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.344403 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(e49373b59adb2d40901794621c78db0337eea3edcc54a974b8c01e456177b9d8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.344497 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(e49373b59adb2d40901794621c78db0337eea3edcc54a974b8c01e456177b9d8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.344524 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(e49373b59adb2d40901794621c78db0337eea3edcc54a974b8c01e456177b9d8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.344609 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators(42449c87-f0c6-4433-92cb-f89e51cb5a14)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators(42449c87-f0c6-4433-92cb-f89e51cb5a14)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(e49373b59adb2d40901794621c78db0337eea3edcc54a974b8c01e456177b9d8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" podUID="42449c87-f0c6-4433-92cb-f89e51cb5a14" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.350205 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(b607f8ac470719a4c7cd4bbfe9c6b3473297701e8c936125a167c0a4af37392d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.350302 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(b607f8ac470719a4c7cd4bbfe9c6b3473297701e8c936125a167c0a4af37392d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.350349 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(b607f8ac470719a4c7cd4bbfe9c6b3473297701e8c936125a167c0a4af37392d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.350463 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators(fd80e9af-20c4-4aaa-9f38-4f46c3b610fb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators(fd80e9af-20c4-4aaa-9f38-4f46c3b610fb)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(b607f8ac470719a4c7cd4bbfe9c6b3473297701e8c936125a167c0a4af37392d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" podUID="fd80e9af-20c4-4aaa-9f38-4f46c3b610fb" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.375959 4780 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.376014 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whmzl\" (UniqueName: \"kubernetes.io/projected/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-kube-api-access-whmzl\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.376068 4780 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cc22221d-0c02-4e8c-8314-c2e6d9290b5e-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.454362 4780 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_kube-multus_multus-8cwb7_openshift-multus_deadb49b-61b8-435f-8168-d7bd3c01b5ad_1 in pod sandbox 2828d1ecfd6b4790649fd340bcbbd6e8aacc6cf483dc00dce4adc5d8d14ef5be from index: no such id: 'caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88'" containerID="caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.454576 4780 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_kube-multus_multus-8cwb7_openshift-multus_deadb49b-61b8-435f-8168-d7bd3c01b5ad_1 in pod sandbox 2828d1ecfd6b4790649fd340bcbbd6e8aacc6cf483dc00dce4adc5d8d14ef5be from index: no such id: 'caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88'" containerID="caa5c9ed8d7c77d4af7b9797ad6117d06def5e3ab3082d2fd61de5f1d0902a88" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.529861 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zq4nc"] Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537342 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="nbdb" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537392 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="nbdb" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537404 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537413 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537425 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kube-rbac-proxy-node" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537434 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kube-rbac-proxy-node" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537449 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537457 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537465 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537473 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537489 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="northd" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537496 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="northd" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537510 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovn-acl-logging" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537517 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovn-acl-logging" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537527 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537537 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537545 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovn-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537552 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovn-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537564 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="sbdb" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537571 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="sbdb" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537587 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kubecfg-setup" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537594 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kubecfg-setup" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.537629 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537636 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537788 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537801 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537882 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovn-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537899 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537910 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kube-rbac-proxy-node" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537939 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="sbdb" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537951 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="northd" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537961 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537973 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovn-acl-logging" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.537990 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="nbdb" Dec 10 10:58:58 crc kubenswrapper[4780]: E1210 10:58:58.538140 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.538150 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.538298 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.538313 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" containerName="ovnkube-controller" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.540969 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982259 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-run-netns\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982365 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqk7z\" (UniqueName: \"kubernetes.io/projected/e04b7e82-708f-44b6-81b7-0a9be2dc017b-kube-api-access-fqk7z\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982406 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-node-log\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982430 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982449 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-var-lib-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982465 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-kubelet\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982483 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-ovn\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982515 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovnkube-config\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982534 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovn-node-metrics-cert\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982559 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-systemd\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982618 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-cni-netd\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982640 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-etc-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982690 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982708 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-log-socket\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.982726 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovnkube-script-lib\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.984128 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-run-ovn-kubernetes\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.984185 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-slash\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.984207 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-cni-bin\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.984236 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-systemd-units\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:58 crc kubenswrapper[4780]: I1210 10:58:58.984273 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-env-overrides\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.071889 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" event={"ID":"cc22221d-0c02-4e8c-8314-c2e6d9290b5e","Type":"ContainerDied","Data":"0ae266c98886075cfee6fbd79b266be7ef745f38c6d1283c5150f29e3aea474c"} Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.072551 4780 scope.go:117] "RemoveContainer" containerID="3b436c5f5fde27d37bb053b20d1909a42902f8da7cd35713ac9cced9d4f93d30" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.072356 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fpl55" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.080473 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/2.log" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085331 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-cni-bin\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085381 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-slash\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085412 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-systemd-units\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085457 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-env-overrides\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085486 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-run-netns\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085544 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqk7z\" (UniqueName: \"kubernetes.io/projected/e04b7e82-708f-44b6-81b7-0a9be2dc017b-kube-api-access-fqk7z\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085598 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-node-log\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085629 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085655 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-var-lib-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085678 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-kubelet\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085707 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-ovn\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085746 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovn-node-metrics-cert\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085773 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovnkube-config\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085816 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-systemd\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085863 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-cni-netd\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085890 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-log-socket\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.085914 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovnkube-script-lib\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086019 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-etc-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086118 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086157 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-run-ovn-kubernetes\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086244 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-run-ovn-kubernetes\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086248 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086283 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-kubelet\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086243 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-systemd\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086322 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-ovn\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086352 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-cni-bin\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086399 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-slash\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.086265 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-var-lib-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.087781 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-node-log\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.088004 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-cni-netd\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.088223 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-log-socket\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.088367 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-etc-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.088479 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-run-openvswitch\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.089184 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-systemd-units\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.091016 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-env-overrides\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.094125 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e04b7e82-708f-44b6-81b7-0a9be2dc017b-host-run-netns\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.101403 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovnkube-config\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.101583 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovnkube-script-lib\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.108862 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e04b7e82-708f-44b6-81b7-0a9be2dc017b-ovn-node-metrics-cert\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.109112 4780 scope.go:117] "RemoveContainer" containerID="df663a4e7ac68c8967db1167ea07b83862e9d52aa13b3f10e51934bb18e7397e" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.146341 4780 scope.go:117] "RemoveContainer" containerID="cafa00280d671b48ed3628917793412a20b9d3be93a80d9a7775003081aea1ab" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.147360 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqk7z\" (UniqueName: \"kubernetes.io/projected/e04b7e82-708f-44b6-81b7-0a9be2dc017b-kube-api-access-fqk7z\") pod \"ovnkube-node-zq4nc\" (UID: \"e04b7e82-708f-44b6-81b7-0a9be2dc017b\") " pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.184012 4780 scope.go:117] "RemoveContainer" containerID="dbc444116b8592021f7d6ba0387d473521cba7c09ca78384be81f688af54554f" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.198564 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fpl55"] Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.220938 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fpl55"] Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.229570 4780 scope.go:117] "RemoveContainer" containerID="ce7c3983123be2f92944322c2cf554e67cf80a51981415d9872bfc2db9f1ed3a" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.256292 4780 scope.go:117] "RemoveContainer" containerID="ef0cc518de07a141be124545f2e547c630329ff507b0e0e71e4dee9b4be5d89a" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.275884 4780 scope.go:117] "RemoveContainer" containerID="782bcb7eb8341b73ff1c32c5af00aaefa2a513e5d1672f9f7df13cd05467131c" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.297468 4780 scope.go:117] "RemoveContainer" containerID="c39c3d56584d0939bfa9be938be09db5c93ddf50e0a56cacaa6e30c7b79475a1" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.325832 4780 scope.go:117] "RemoveContainer" containerID="ada0c2d5e74e1805777a330c39a3c1c0fb677f876387207a83c79bd7ed06775a" Dec 10 10:58:59 crc kubenswrapper[4780]: I1210 10:58:59.428541 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:58:59 crc kubenswrapper[4780]: W1210 10:58:59.461675 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode04b7e82_708f_44b6_81b7_0a9be2dc017b.slice/crio-825fd4a9748ea43cff57fbc2f9f4334605ca88ea47de152921a4bb0f07125c03 WatchSource:0}: Error finding container 825fd4a9748ea43cff57fbc2f9f4334605ca88ea47de152921a4bb0f07125c03: Status 404 returned error can't find the container with id 825fd4a9748ea43cff57fbc2f9f4334605ca88ea47de152921a4bb0f07125c03 Dec 10 10:59:00 crc kubenswrapper[4780]: I1210 10:58:59.973004 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc22221d-0c02-4e8c-8314-c2e6d9290b5e" path="/var/lib/kubelet/pods/cc22221d-0c02-4e8c-8314-c2e6d9290b5e/volumes" Dec 10 10:59:00 crc kubenswrapper[4780]: I1210 10:59:00.479956 4780 generic.go:334] "Generic (PLEG): container finished" podID="e04b7e82-708f-44b6-81b7-0a9be2dc017b" containerID="5f9839a1fcfa16b3318ca23d5d10591f15b0e124b4f4d2c62c5574b7f05b54c2" exitCode=0 Dec 10 10:59:00 crc kubenswrapper[4780]: I1210 10:59:00.480077 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerDied","Data":"5f9839a1fcfa16b3318ca23d5d10591f15b0e124b4f4d2c62c5574b7f05b54c2"} Dec 10 10:59:00 crc kubenswrapper[4780]: I1210 10:59:00.480127 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"825fd4a9748ea43cff57fbc2f9f4334605ca88ea47de152921a4bb0f07125c03"} Dec 10 10:59:00 crc kubenswrapper[4780]: I1210 10:59:00.526077 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8cwb7_deadb49b-61b8-435f-8168-d7bd3c01b5ad/kube-multus/2.log" Dec 10 10:59:00 crc kubenswrapper[4780]: I1210 10:59:00.526148 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8cwb7" event={"ID":"deadb49b-61b8-435f-8168-d7bd3c01b5ad","Type":"ContainerStarted","Data":"bfe2ea5863322d7fc3412836a708a03bbea0988b73ff84aba3e6c58a371b288b"} Dec 10 10:59:01 crc kubenswrapper[4780]: I1210 10:59:01.607975 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"9d419adae78cdbe67d8f82cc37254eec824c387ac495137f1080684edeb61313"} Dec 10 10:59:02 crc kubenswrapper[4780]: I1210 10:59:02.691500 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"fa7ced2c612728c221daeccf50086a7ef7d668f06ca92a6d849a7edca804ec9e"} Dec 10 10:59:02 crc kubenswrapper[4780]: I1210 10:59:02.691571 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"07caf7e3f924f67020db81141a3cbd04fe118bc69b973ee6e7e668b73d015124"} Dec 10 10:59:02 crc kubenswrapper[4780]: I1210 10:59:02.691588 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"c796931386ac60ecee4c05b3738c7f0dd269c0102e93c0bbb5ab6145e9e85b78"} Dec 10 10:59:02 crc kubenswrapper[4780]: I1210 10:59:02.691598 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"6016472ecabb9c9a02786b6cba60cec8e501d252b5ebb27fc5f2dc5dfc1fb0ba"} Dec 10 10:59:03 crc kubenswrapper[4780]: I1210 10:59:03.702444 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"bb59e34459631927704e7b362da36410a04e5bd4da22443dc68b81a78747e0a5"} Dec 10 10:59:06 crc kubenswrapper[4780]: I1210 10:59:06.729108 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"93ed4e1049f145642ca624fc825510ae8d4a8fd61aebea868b15416e5d770696"} Dec 10 10:59:09 crc kubenswrapper[4780]: I1210 10:59:09.958244 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:09 crc kubenswrapper[4780]: I1210 10:59:09.958323 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:09 crc kubenswrapper[4780]: I1210 10:59:09.959504 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:09 crc kubenswrapper[4780]: I1210 10:59:09.959835 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:10 crc kubenswrapper[4780]: E1210 10:59:10.032899 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(926a36b8b6de1f812aee2b5839ba548207c7728dde35e241e9c19fc4e3451448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:10 crc kubenswrapper[4780]: E1210 10:59:10.033025 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(926a36b8b6de1f812aee2b5839ba548207c7728dde35e241e9c19fc4e3451448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:10 crc kubenswrapper[4780]: E1210 10:59:10.033056 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(926a36b8b6de1f812aee2b5839ba548207c7728dde35e241e9c19fc4e3451448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:10 crc kubenswrapper[4780]: E1210 10:59:10.033122 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators(fd80e9af-20c4-4aaa-9f38-4f46c3b610fb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators(fd80e9af-20c4-4aaa-9f38-4f46c3b610fb)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(926a36b8b6de1f812aee2b5839ba548207c7728dde35e241e9c19fc4e3451448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" podUID="fd80e9af-20c4-4aaa-9f38-4f46c3b610fb" Dec 10 10:59:10 crc kubenswrapper[4780]: E1210 10:59:10.042579 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(29469515c3f8dea629a4cfa7102cdacbd132b5a29c44ede79e738ea48452f796): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:10 crc kubenswrapper[4780]: E1210 10:59:10.042692 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(29469515c3f8dea629a4cfa7102cdacbd132b5a29c44ede79e738ea48452f796): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:10 crc kubenswrapper[4780]: E1210 10:59:10.042837 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(29469515c3f8dea629a4cfa7102cdacbd132b5a29c44ede79e738ea48452f796): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:10 crc kubenswrapper[4780]: E1210 10:59:10.043251 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators(42449c87-f0c6-4433-92cb-f89e51cb5a14)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators(42449c87-f0c6-4433-92cb-f89e51cb5a14)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(29469515c3f8dea629a4cfa7102cdacbd132b5a29c44ede79e738ea48452f796): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" podUID="42449c87-f0c6-4433-92cb-f89e51cb5a14" Dec 10 10:59:10 crc kubenswrapper[4780]: I1210 10:59:10.958055 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:10 crc kubenswrapper[4780]: I1210 10:59:10.959502 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:11 crc kubenswrapper[4780]: E1210 10:59:11.249277 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(a124dcae265f20b4f23686716ac89bc2f36f45a420c5d2840895a3acc83c95b6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:11 crc kubenswrapper[4780]: E1210 10:59:11.249381 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(a124dcae265f20b4f23686716ac89bc2f36f45a420c5d2840895a3acc83c95b6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:11 crc kubenswrapper[4780]: E1210 10:59:11.249468 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(a124dcae265f20b4f23686716ac89bc2f36f45a420c5d2840895a3acc83c95b6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:11 crc kubenswrapper[4780]: E1210 10:59:11.249529 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators(db680a43-b9fa-45d6-b751-f4467cfe5065)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators(db680a43-b9fa-45d6-b751-f4467cfe5065)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(a124dcae265f20b4f23686716ac89bc2f36f45a420c5d2840895a3acc83c95b6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" podUID="db680a43-b9fa-45d6-b751-f4467cfe5065" Dec 10 10:59:11 crc kubenswrapper[4780]: I1210 10:59:11.821379 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" event={"ID":"e04b7e82-708f-44b6-81b7-0a9be2dc017b","Type":"ContainerStarted","Data":"d9875817336b359f7f6811df1d1f192d94600c3aee89c12226449b36568d7adc"} Dec 10 10:59:11 crc kubenswrapper[4780]: I1210 10:59:11.958713 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:11 crc kubenswrapper[4780]: I1210 10:59:11.959393 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:11 crc kubenswrapper[4780]: E1210 10:59:11.998306 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(7b482e0f1a8eafcd9d892148b63cf6aa8a9d93e4b8a16f6e2d48a8eb85e6e448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:11 crc kubenswrapper[4780]: E1210 10:59:11.998491 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(7b482e0f1a8eafcd9d892148b63cf6aa8a9d93e4b8a16f6e2d48a8eb85e6e448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:11 crc kubenswrapper[4780]: E1210 10:59:11.998545 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(7b482e0f1a8eafcd9d892148b63cf6aa8a9d93e4b8a16f6e2d48a8eb85e6e448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:11 crc kubenswrapper[4780]: E1210 10:59:11.998648 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-m5pj4_openshift-operators(fb9d5eb8-6ba0-4dea-8226-a3e362924f16)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-m5pj4_openshift-operators(fb9d5eb8-6ba0-4dea-8226-a3e362924f16)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(7b482e0f1a8eafcd9d892148b63cf6aa8a9d93e4b8a16f6e2d48a8eb85e6e448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" podUID="fb9d5eb8-6ba0-4dea-8226-a3e362924f16" Dec 10 10:59:12 crc kubenswrapper[4780]: I1210 10:59:12.979263 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:12 crc kubenswrapper[4780]: I1210 10:59:12.980537 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:12 crc kubenswrapper[4780]: I1210 10:59:12.982395 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:59:12 crc kubenswrapper[4780]: I1210 10:59:12.982774 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:59:12 crc kubenswrapper[4780]: I1210 10:59:12.982800 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:59:13 crc kubenswrapper[4780]: I1210 10:59:13.053158 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:59:13 crc kubenswrapper[4780]: I1210 10:59:13.063326 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:59:13 crc kubenswrapper[4780]: E1210 10:59:13.069113 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(6695add366300adae6d00f75b3bc1aa7285cc62ae558d50194a57c370337e01b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:13 crc kubenswrapper[4780]: E1210 10:59:13.069237 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(6695add366300adae6d00f75b3bc1aa7285cc62ae558d50194a57c370337e01b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:13 crc kubenswrapper[4780]: E1210 10:59:13.069266 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(6695add366300adae6d00f75b3bc1aa7285cc62ae558d50194a57c370337e01b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:13 crc kubenswrapper[4780]: E1210 10:59:13.069318 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-kmhks_openshift-operators(780b464e-7e56-49ba-b0af-fc0731e1290d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-kmhks_openshift-operators(780b464e-7e56-49ba-b0af-fc0731e1290d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(6695add366300adae6d00f75b3bc1aa7285cc62ae558d50194a57c370337e01b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" podUID="780b464e-7e56-49ba-b0af-fc0731e1290d" Dec 10 10:59:13 crc kubenswrapper[4780]: I1210 10:59:13.105649 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" podStartSLOduration=15.105594994 podStartE2EDuration="15.105594994s" podCreationTimestamp="2025-12-10 10:58:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:59:13.09403158 +0000 UTC m=+857.947425023" watchObservedRunningTime="2025-12-10 10:59:13.105594994 +0000 UTC m=+857.958988437" Dec 10 10:59:13 crc kubenswrapper[4780]: I1210 10:59:13.992225 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mkxpc" event={"ID":"f85d01b9-be91-4ff9-bf9d-886fa995d582","Type":"ContainerStarted","Data":"861af87530718e309fb66541cfd6f906c2798a7b77a77431f77b672922773a9a"} Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.157149 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-kmhks"] Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.157666 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.159170 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.171029 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5"] Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.171238 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.172029 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.203518 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" podUID="e04b7e82-708f-44b6-81b7-0a9be2dc017b" containerName="ovnkube-controller" probeResult="failure" output="" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.210875 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh"] Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.211142 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.212096 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.216303 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-m5pj4"] Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.216499 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.217227 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.220845 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f"] Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.221055 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:15 crc kubenswrapper[4780]: I1210 10:59:15.221903 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.541857 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(9c0a71fce049ce6d0db25d5f232450f3c329c49e5e6a3d6d36dd6348c8ad61cf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.541963 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(9c0a71fce049ce6d0db25d5f232450f3c329c49e5e6a3d6d36dd6348c8ad61cf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.541991 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(9c0a71fce049ce6d0db25d5f232450f3c329c49e5e6a3d6d36dd6348c8ad61cf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.542057 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators(fd80e9af-20c4-4aaa-9f38-4f46c3b610fb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators(fd80e9af-20c4-4aaa-9f38-4f46c3b610fb)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-fqxm5_openshift-operators_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb_0(9c0a71fce049ce6d0db25d5f232450f3c329c49e5e6a3d6d36dd6348c8ad61cf): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" podUID="fd80e9af-20c4-4aaa-9f38-4f46c3b610fb" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.552256 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(d927f31f7f2b62051c53c945cff014c3e5861a49d55750bbad0f4cd6b289e409): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.552381 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(d927f31f7f2b62051c53c945cff014c3e5861a49d55750bbad0f4cd6b289e409): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.552427 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(d927f31f7f2b62051c53c945cff014c3e5861a49d55750bbad0f4cd6b289e409): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.552511 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators(db680a43-b9fa-45d6-b751-f4467cfe5065)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators(db680a43-b9fa-45d6-b751-f4467cfe5065)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_openshift-operators_db680a43-b9fa-45d6-b751-f4467cfe5065_0(d927f31f7f2b62051c53c945cff014c3e5861a49d55750bbad0f4cd6b289e409): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" podUID="db680a43-b9fa-45d6-b751-f4467cfe5065" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.568381 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(3fdf5a5616171b006aed8decf877b9cb8e14c5a4f5ad8d3094976143934babfb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.568475 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(3fdf5a5616171b006aed8decf877b9cb8e14c5a4f5ad8d3094976143934babfb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.568504 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(3fdf5a5616171b006aed8decf877b9cb8e14c5a4f5ad8d3094976143934babfb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.568559 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-m5pj4_openshift-operators(fb9d5eb8-6ba0-4dea-8226-a3e362924f16)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-m5pj4_openshift-operators(fb9d5eb8-6ba0-4dea-8226-a3e362924f16)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-m5pj4_openshift-operators_fb9d5eb8-6ba0-4dea-8226-a3e362924f16_0(3fdf5a5616171b006aed8decf877b9cb8e14c5a4f5ad8d3094976143934babfb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" podUID="fb9d5eb8-6ba0-4dea-8226-a3e362924f16" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.579010 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(b2356e909e3ff7456de6a577a5a65084acb8ea897f6ecfd76f99d4c7f0958923): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.579104 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(b2356e909e3ff7456de6a577a5a65084acb8ea897f6ecfd76f99d4c7f0958923): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.579134 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(b2356e909e3ff7456de6a577a5a65084acb8ea897f6ecfd76f99d4c7f0958923): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.579190 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators(42449c87-f0c6-4433-92cb-f89e51cb5a14)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators(42449c87-f0c6-4433-92cb-f89e51cb5a14)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators_42449c87-f0c6-4433-92cb-f89e51cb5a14_0(b2356e909e3ff7456de6a577a5a65084acb8ea897f6ecfd76f99d4c7f0958923): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" podUID="42449c87-f0c6-4433-92cb-f89e51cb5a14" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.601244 4780 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(85c7f10a45d68f47066c72a1b39a444546bf3272dda00fae53ed25c7f1c26d54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.601361 4780 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(85c7f10a45d68f47066c72a1b39a444546bf3272dda00fae53ed25c7f1c26d54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.601390 4780 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(85c7f10a45d68f47066c72a1b39a444546bf3272dda00fae53ed25c7f1c26d54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:15 crc kubenswrapper[4780]: E1210 10:59:15.601451 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-kmhks_openshift-operators(780b464e-7e56-49ba-b0af-fc0731e1290d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-kmhks_openshift-operators(780b464e-7e56-49ba-b0af-fc0731e1290d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-kmhks_openshift-operators_780b464e-7e56-49ba-b0af-fc0731e1290d_0(85c7f10a45d68f47066c72a1b39a444546bf3272dda00fae53ed25c7f1c26d54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" podUID="780b464e-7e56-49ba-b0af-fc0731e1290d" Dec 10 10:59:19 crc kubenswrapper[4780]: I1210 10:59:19.059316 4780 generic.go:334] "Generic (PLEG): container finished" podID="f85d01b9-be91-4ff9-bf9d-886fa995d582" containerID="861af87530718e309fb66541cfd6f906c2798a7b77a77431f77b672922773a9a" exitCode=0 Dec 10 10:59:19 crc kubenswrapper[4780]: I1210 10:59:19.060035 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mkxpc" event={"ID":"f85d01b9-be91-4ff9-bf9d-886fa995d582","Type":"ContainerDied","Data":"861af87530718e309fb66541cfd6f906c2798a7b77a77431f77b672922773a9a"} Dec 10 10:59:22 crc kubenswrapper[4780]: I1210 10:59:22.085114 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mkxpc" event={"ID":"f85d01b9-be91-4ff9-bf9d-886fa995d582","Type":"ContainerStarted","Data":"b9f8c2678356bf71f6564ed6174ecf2a348c571e2cbe82baf5416ae322ebb4c3"} Dec 10 10:59:22 crc kubenswrapper[4780]: I1210 10:59:22.116895 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mkxpc" podStartSLOduration=2.256980729 podStartE2EDuration="51.116861499s" podCreationTimestamp="2025-12-10 10:58:31 +0000 UTC" firstStartedPulling="2025-12-10 10:58:32.917346203 +0000 UTC m=+817.770739646" lastFinishedPulling="2025-12-10 10:59:21.777226973 +0000 UTC m=+866.630620416" observedRunningTime="2025-12-10 10:59:22.110767524 +0000 UTC m=+866.964160987" watchObservedRunningTime="2025-12-10 10:59:22.116861499 +0000 UTC m=+866.970254942" Dec 10 10:59:26 crc kubenswrapper[4780]: I1210 10:59:26.957943 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:26 crc kubenswrapper[4780]: I1210 10:59:26.958812 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:26 crc kubenswrapper[4780]: I1210 10:59:26.959218 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" Dec 10 10:59:26 crc kubenswrapper[4780]: I1210 10:59:26.959605 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:27 crc kubenswrapper[4780]: I1210 10:59:27.475380 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:59:27 crc kubenswrapper[4780]: I1210 10:59:27.475827 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:59:27 crc kubenswrapper[4780]: I1210 10:59:27.493890 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-kmhks"] Dec 10 10:59:27 crc kubenswrapper[4780]: I1210 10:59:27.568631 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f"] Dec 10 10:59:27 crc kubenswrapper[4780]: I1210 10:59:27.958379 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:27 crc kubenswrapper[4780]: I1210 10:59:27.959707 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 10:59:28 crc kubenswrapper[4780]: I1210 10:59:28.136502 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" event={"ID":"780b464e-7e56-49ba-b0af-fc0731e1290d","Type":"ContainerStarted","Data":"f0e878baa75b7422fe661448550390a1b78dd9d67b3eeb2aafa3f7c901e5057b"} Dec 10 10:59:28 crc kubenswrapper[4780]: I1210 10:59:28.138229 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" event={"ID":"42449c87-f0c6-4433-92cb-f89e51cb5a14","Type":"ContainerStarted","Data":"22d623d3e25b70469d27f5b9fea3f9fd82a8035f9f2a38465f71076edecb58d7"} Dec 10 10:59:28 crc kubenswrapper[4780]: I1210 10:59:28.202153 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-m5pj4"] Dec 10 10:59:29 crc kubenswrapper[4780]: I1210 10:59:29.146331 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" event={"ID":"fb9d5eb8-6ba0-4dea-8226-a3e362924f16","Type":"ContainerStarted","Data":"f6684dd7dc28619d7e16083aa6587a07e2ecd6e49bb4feda910a71e9944afe35"} Dec 10 10:59:29 crc kubenswrapper[4780]: I1210 10:59:29.458768 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zq4nc" Dec 10 10:59:29 crc kubenswrapper[4780]: I1210 10:59:29.960327 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:29 crc kubenswrapper[4780]: I1210 10:59:29.960371 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:29 crc kubenswrapper[4780]: I1210 10:59:29.960886 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" Dec 10 10:59:29 crc kubenswrapper[4780]: I1210 10:59:29.960889 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" Dec 10 10:59:30 crc kubenswrapper[4780]: I1210 10:59:30.885245 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh"] Dec 10 10:59:30 crc kubenswrapper[4780]: I1210 10:59:30.891967 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5"] Dec 10 10:59:30 crc kubenswrapper[4780]: W1210 10:59:30.914445 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd80e9af_20c4_4aaa_9f38_4f46c3b610fb.slice/crio-e0c4f4e87a97f4a2366ecf90e51939e60e2c40e37e94c9859ed7d3731b2eb8c2 WatchSource:0}: Error finding container e0c4f4e87a97f4a2366ecf90e51939e60e2c40e37e94c9859ed7d3731b2eb8c2: Status 404 returned error can't find the container with id e0c4f4e87a97f4a2366ecf90e51939e60e2c40e37e94c9859ed7d3731b2eb8c2 Dec 10 10:59:31 crc kubenswrapper[4780]: I1210 10:59:31.240041 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" event={"ID":"fd80e9af-20c4-4aaa-9f38-4f46c3b610fb","Type":"ContainerStarted","Data":"e0c4f4e87a97f4a2366ecf90e51939e60e2c40e37e94c9859ed7d3731b2eb8c2"} Dec 10 10:59:31 crc kubenswrapper[4780]: I1210 10:59:31.242808 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" event={"ID":"db680a43-b9fa-45d6-b751-f4467cfe5065","Type":"ContainerStarted","Data":"82aad126a9e8a55919e4a0971815672d19eb742527f2d640d10152063439af44"} Dec 10 10:59:31 crc kubenswrapper[4780]: I1210 10:59:31.687289 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:59:31 crc kubenswrapper[4780]: I1210 10:59:31.688214 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:59:31 crc kubenswrapper[4780]: I1210 10:59:31.931587 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:59:32 crc kubenswrapper[4780]: I1210 10:59:32.327631 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mkxpc" Dec 10 10:59:32 crc kubenswrapper[4780]: I1210 10:59:32.915352 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mkxpc"] Dec 10 10:59:33 crc kubenswrapper[4780]: I1210 10:59:33.085503 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-crkcj"] Dec 10 10:59:33 crc kubenswrapper[4780]: I1210 10:59:33.086023 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-crkcj" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="registry-server" containerID="cri-o://22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b" gracePeriod=2 Dec 10 10:59:34 crc kubenswrapper[4780]: I1210 10:59:34.296360 4780 generic.go:334] "Generic (PLEG): container finished" podID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerID="22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b" exitCode=0 Dec 10 10:59:34 crc kubenswrapper[4780]: I1210 10:59:34.296546 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crkcj" event={"ID":"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b","Type":"ContainerDied","Data":"22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b"} Dec 10 10:59:41 crc kubenswrapper[4780]: E1210 10:59:41.608475 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b is running failed: container process not found" containerID="22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b" cmd=["grpc_health_probe","-addr=:50051"] Dec 10 10:59:41 crc kubenswrapper[4780]: E1210 10:59:41.609979 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b is running failed: container process not found" containerID="22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b" cmd=["grpc_health_probe","-addr=:50051"] Dec 10 10:59:41 crc kubenswrapper[4780]: E1210 10:59:41.610426 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b is running failed: container process not found" containerID="22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b" cmd=["grpc_health_probe","-addr=:50051"] Dec 10 10:59:41 crc kubenswrapper[4780]: E1210 10:59:41.610474 4780 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-crkcj" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="registry-server" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.163749 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.310551 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-utilities\") pod \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.310960 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-catalog-content\") pod \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.311007 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6srh\" (UniqueName: \"kubernetes.io/projected/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-kube-api-access-v6srh\") pod \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\" (UID: \"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b\") " Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.311988 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-utilities" (OuterVolumeSpecName: "utilities") pod "2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" (UID: "2dc3d032-b524-4c67-9bc1-b8d8f3554b3b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.331502 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-kube-api-access-v6srh" (OuterVolumeSpecName: "kube-api-access-v6srh") pod "2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" (UID: "2dc3d032-b524-4c67-9bc1-b8d8f3554b3b"). InnerVolumeSpecName "kube-api-access-v6srh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.412405 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.412446 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6srh\" (UniqueName: \"kubernetes.io/projected/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-kube-api-access-v6srh\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.455630 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" (UID: "2dc3d032-b524-4c67-9bc1-b8d8f3554b3b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.514100 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.821657 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-crkcj" event={"ID":"2dc3d032-b524-4c67-9bc1-b8d8f3554b3b","Type":"ContainerDied","Data":"526c4d23adec4705afef1fe14f067df25fbe3867393a04ce86efe1e47478c651"} Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.821726 4780 scope.go:117] "RemoveContainer" containerID="22a321f7dc9f6ebd49601178d44c76d6a2685274b7948e25210bc39725bd8e8b" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.821769 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-crkcj" Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.863977 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-crkcj"] Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.870150 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-crkcj"] Dec 10 10:59:44 crc kubenswrapper[4780]: I1210 10:59:44.915410 4780 scope.go:117] "RemoveContainer" containerID="e4108e2c1f932b13e2fb7de8d846511af4941f9311cc2a0c449fbfdf1e5dc690" Dec 10 10:59:45 crc kubenswrapper[4780]: I1210 10:59:45.968102 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" path="/var/lib/kubelet/pods/2dc3d032-b524-4c67-9bc1-b8d8f3554b3b/volumes" Dec 10 10:59:48 crc kubenswrapper[4780]: E1210 10:59:48.171791 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec" Dec 10 10:59:48 crc kubenswrapper[4780]: E1210 10:59:48.172665 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_openshift-operators(42449c87-f0c6-4433-92cb-f89e51cb5a14): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:59:48 crc kubenswrapper[4780]: E1210 10:59:48.174137 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" podUID="42449c87-f0c6-4433-92cb-f89e51cb5a14" Dec 10 10:59:48 crc kubenswrapper[4780]: E1210 10:59:48.857570 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec\\\"\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" podUID="42449c87-f0c6-4433-92cb-f89e51cb5a14" Dec 10 10:59:49 crc kubenswrapper[4780]: E1210 10:59:49.937263 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385" Dec 10 10:59:49 crc kubenswrapper[4780]: E1210 10:59:49.937579 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:perses-operator,Image:registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openshift-service-ca,ReadOnly:true,MountPath:/ca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-brhd8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod perses-operator-5446b9c989-m5pj4_openshift-operators(fb9d5eb8-6ba0-4dea-8226-a3e362924f16): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:59:49 crc kubenswrapper[4780]: E1210 10:59:49.938849 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" podUID="fb9d5eb8-6ba0-4dea-8226-a3e362924f16" Dec 10 10:59:49 crc kubenswrapper[4780]: I1210 10:59:49.943640 4780 scope.go:117] "RemoveContainer" containerID="9de6c01a8076df366571763c1f53ead6c965defb7ca5c26326348176a6772e61" Dec 10 10:59:50 crc kubenswrapper[4780]: I1210 10:59:50.871141 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" event={"ID":"fd80e9af-20c4-4aaa-9f38-4f46c3b610fb","Type":"ContainerStarted","Data":"a8ed0443aa5a240db7f5a1ff5c89ac897c9c278b5ff1d0cd0afc9ae7d08eef08"} Dec 10 10:59:50 crc kubenswrapper[4780]: I1210 10:59:50.873603 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" event={"ID":"780b464e-7e56-49ba-b0af-fc0731e1290d","Type":"ContainerStarted","Data":"a909d305f0794857cda8be5a4982af9a2c85d56823458640d528ec8945673ebb"} Dec 10 10:59:50 crc kubenswrapper[4780]: I1210 10:59:50.873837 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:50 crc kubenswrapper[4780]: I1210 10:59:50.876222 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" event={"ID":"db680a43-b9fa-45d6-b751-f4467cfe5065","Type":"ContainerStarted","Data":"fc741450bca0d5cb280f360373e2831326c984c33d4f5194f089892b924dc439"} Dec 10 10:59:50 crc kubenswrapper[4780]: E1210 10:59:50.877997 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385\\\"\"" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" podUID="fb9d5eb8-6ba0-4dea-8226-a3e362924f16" Dec 10 10:59:50 crc kubenswrapper[4780]: I1210 10:59:50.881720 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" Dec 10 10:59:50 crc kubenswrapper[4780]: I1210 10:59:50.896025 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-fqxm5" podStartSLOduration=40.870783571 podStartE2EDuration="59.895971746s" podCreationTimestamp="2025-12-10 10:58:51 +0000 UTC" firstStartedPulling="2025-12-10 10:59:30.92133308 +0000 UTC m=+875.774726523" lastFinishedPulling="2025-12-10 10:59:49.946521255 +0000 UTC m=+894.799914698" observedRunningTime="2025-12-10 10:59:50.889680626 +0000 UTC m=+895.743074069" watchObservedRunningTime="2025-12-10 10:59:50.895971746 +0000 UTC m=+895.749365189" Dec 10 10:59:50 crc kubenswrapper[4780]: I1210 10:59:50.943740 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh" podStartSLOduration=40.903853021 podStartE2EDuration="59.943703229s" podCreationTimestamp="2025-12-10 10:58:51 +0000 UTC" firstStartedPulling="2025-12-10 10:59:30.908436702 +0000 UTC m=+875.761830155" lastFinishedPulling="2025-12-10 10:59:49.94828692 +0000 UTC m=+894.801680363" observedRunningTime="2025-12-10 10:59:50.942593411 +0000 UTC m=+895.795986854" watchObservedRunningTime="2025-12-10 10:59:50.943703229 +0000 UTC m=+895.797096672" Dec 10 10:59:50 crc kubenswrapper[4780]: I1210 10:59:50.988696 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-kmhks" podStartSLOduration=37.499172341 podStartE2EDuration="59.988663582s" podCreationTimestamp="2025-12-10 10:58:51 +0000 UTC" firstStartedPulling="2025-12-10 10:59:27.50838471 +0000 UTC m=+872.361778153" lastFinishedPulling="2025-12-10 10:59:49.997875961 +0000 UTC m=+894.851269394" observedRunningTime="2025-12-10 10:59:50.984025365 +0000 UTC m=+895.837418818" watchObservedRunningTime="2025-12-10 10:59:50.988663582 +0000 UTC m=+895.842057025" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.475625 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.476378 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.476442 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.477213 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3fbfe685041c9fd303141118710f14f576d10f3417446140048debbbb20a3ef0"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.477390 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://3fbfe685041c9fd303141118710f14f576d10f3417446140048debbbb20a3ef0" gracePeriod=600 Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.715958 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-qrnfx"] Dec 10 10:59:57 crc kubenswrapper[4780]: E1210 10:59:57.716317 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="registry-server" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.716343 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="registry-server" Dec 10 10:59:57 crc kubenswrapper[4780]: E1210 10:59:57.716373 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="extract-content" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.716379 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="extract-content" Dec 10 10:59:57 crc kubenswrapper[4780]: E1210 10:59:57.716391 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="extract-utilities" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.716398 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="extract-utilities" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.716558 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dc3d032-b524-4c67-9bc1-b8d8f3554b3b" containerName="registry-server" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.717171 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-qrnfx" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.719188 4780 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-b5gjs" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.720123 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.720479 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.745040 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gzx8b"] Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.747242 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-gzx8b" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.747543 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc4sp\" (UniqueName: \"kubernetes.io/projected/25874150-d93c-4f21-a259-e4993d52a783-kube-api-access-qc4sp\") pod \"cert-manager-cainjector-7f985d654d-qrnfx\" (UID: \"25874150-d93c-4f21-a259-e4993d52a783\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-qrnfx" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.747634 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqk2f\" (UniqueName: \"kubernetes.io/projected/1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81-kube-api-access-xqk2f\") pod \"cert-manager-5b446d88c5-gzx8b\" (UID: \"1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81\") " pod="cert-manager/cert-manager-5b446d88c5-gzx8b" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.750235 4780 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-2rc7j" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.766224 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-qrnfx"] Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.778791 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gzx8b"] Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.796476 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-gv6zp"] Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.798285 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.804079 4780 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-9jx2w" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.842702 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-gv6zp"] Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.850681 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc4sp\" (UniqueName: \"kubernetes.io/projected/25874150-d93c-4f21-a259-e4993d52a783-kube-api-access-qc4sp\") pod \"cert-manager-cainjector-7f985d654d-qrnfx\" (UID: \"25874150-d93c-4f21-a259-e4993d52a783\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-qrnfx" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.851047 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sfbf\" (UniqueName: \"kubernetes.io/projected/52db28df-3ad3-4c20-9c42-168bb32f6c08-kube-api-access-4sfbf\") pod \"cert-manager-webhook-5655c58dd6-gv6zp\" (UID: \"52db28df-3ad3-4c20-9c42-168bb32f6c08\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.851136 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqk2f\" (UniqueName: \"kubernetes.io/projected/1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81-kube-api-access-xqk2f\") pod \"cert-manager-5b446d88c5-gzx8b\" (UID: \"1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81\") " pod="cert-manager/cert-manager-5b446d88c5-gzx8b" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.890431 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqk2f\" (UniqueName: \"kubernetes.io/projected/1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81-kube-api-access-xqk2f\") pod \"cert-manager-5b446d88c5-gzx8b\" (UID: \"1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81\") " pod="cert-manager/cert-manager-5b446d88c5-gzx8b" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.896393 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc4sp\" (UniqueName: \"kubernetes.io/projected/25874150-d93c-4f21-a259-e4993d52a783-kube-api-access-qc4sp\") pod \"cert-manager-cainjector-7f985d654d-qrnfx\" (UID: \"25874150-d93c-4f21-a259-e4993d52a783\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-qrnfx" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.952499 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sfbf\" (UniqueName: \"kubernetes.io/projected/52db28df-3ad3-4c20-9c42-168bb32f6c08-kube-api-access-4sfbf\") pod \"cert-manager-webhook-5655c58dd6-gv6zp\" (UID: \"52db28df-3ad3-4c20-9c42-168bb32f6c08\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.977548 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="3fbfe685041c9fd303141118710f14f576d10f3417446140048debbbb20a3ef0" exitCode=0 Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.977611 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"3fbfe685041c9fd303141118710f14f576d10f3417446140048debbbb20a3ef0"} Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.977651 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"ed771cb9f33bfa44077ddff43b64d4340b6f781baf12fbbaaac2b0023588cc1c"} Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.977736 4780 scope.go:117] "RemoveContainer" containerID="36d00436bbc9ae1da1897b8b9f2c3475af18239ee7063fe90cc695128e282bd3" Dec 10 10:59:57 crc kubenswrapper[4780]: I1210 10:59:57.980308 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sfbf\" (UniqueName: \"kubernetes.io/projected/52db28df-3ad3-4c20-9c42-168bb32f6c08-kube-api-access-4sfbf\") pod \"cert-manager-webhook-5655c58dd6-gv6zp\" (UID: \"52db28df-3ad3-4c20-9c42-168bb32f6c08\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.044227 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-qrnfx" Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.065703 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-gzx8b" Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.137466 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.450371 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-qrnfx"] Dec 10 10:59:58 crc kubenswrapper[4780]: W1210 10:59:58.454049 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25874150_d93c_4f21_a259_e4993d52a783.slice/crio-9be86c85bc565000e3d776f1e1a4d6e96d6cfa993f93bf40378b4fcd7eead03b WatchSource:0}: Error finding container 9be86c85bc565000e3d776f1e1a4d6e96d6cfa993f93bf40378b4fcd7eead03b: Status 404 returned error can't find the container with id 9be86c85bc565000e3d776f1e1a4d6e96d6cfa993f93bf40378b4fcd7eead03b Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.619419 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gzx8b"] Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.640887 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-gv6zp"] Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.986445 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-qrnfx" event={"ID":"25874150-d93c-4f21-a259-e4993d52a783","Type":"ContainerStarted","Data":"9be86c85bc565000e3d776f1e1a4d6e96d6cfa993f93bf40378b4fcd7eead03b"} Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.988064 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" event={"ID":"52db28df-3ad3-4c20-9c42-168bb32f6c08","Type":"ContainerStarted","Data":"55bad74933bcbb9e7310d3186171849fb270e58c2f01b063c02b75edb8970c75"} Dec 10 10:59:58 crc kubenswrapper[4780]: I1210 10:59:58.989423 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gzx8b" event={"ID":"1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81","Type":"ContainerStarted","Data":"61b4f9d37741c0c15978aa7f61f9a8d7f2d71cb10cfca10c6dcce8db08da2e85"} Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.163139 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm"] Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.164985 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.168726 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.169344 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.172987 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm"] Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.214732 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0255c5ce-87c5-455f-a1e4-bca36b91e355-config-volume\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.214827 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0255c5ce-87c5-455f-a1e4-bca36b91e355-secret-volume\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.214938 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx9nv\" (UniqueName: \"kubernetes.io/projected/0255c5ce-87c5-455f-a1e4-bca36b91e355-kube-api-access-hx9nv\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.316969 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0255c5ce-87c5-455f-a1e4-bca36b91e355-config-volume\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.317054 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0255c5ce-87c5-455f-a1e4-bca36b91e355-secret-volume\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.317129 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx9nv\" (UniqueName: \"kubernetes.io/projected/0255c5ce-87c5-455f-a1e4-bca36b91e355-kube-api-access-hx9nv\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.320464 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0255c5ce-87c5-455f-a1e4-bca36b91e355-config-volume\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.327381 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0255c5ce-87c5-455f-a1e4-bca36b91e355-secret-volume\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.339003 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx9nv\" (UniqueName: \"kubernetes.io/projected/0255c5ce-87c5-455f-a1e4-bca36b91e355-kube-api-access-hx9nv\") pod \"collect-profiles-29422740-nsfxm\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:00 crc kubenswrapper[4780]: I1210 11:00:00.503653 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:02 crc kubenswrapper[4780]: I1210 11:00:02.390997 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm"] Dec 10 11:00:03 crc kubenswrapper[4780]: W1210 11:00:03.235579 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0255c5ce_87c5_455f_a1e4_bca36b91e355.slice/crio-b7b549a06ce2648e9db2d134a2e4853f1827bb8f429ea9d428e0ae05c81c5ab3 WatchSource:0}: Error finding container b7b549a06ce2648e9db2d134a2e4853f1827bb8f429ea9d428e0ae05c81c5ab3: Status 404 returned error can't find the container with id b7b549a06ce2648e9db2d134a2e4853f1827bb8f429ea9d428e0ae05c81c5ab3 Dec 10 11:00:03 crc kubenswrapper[4780]: I1210 11:00:03.382116 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" event={"ID":"0255c5ce-87c5-455f-a1e4-bca36b91e355","Type":"ContainerStarted","Data":"b7b549a06ce2648e9db2d134a2e4853f1827bb8f429ea9d428e0ae05c81c5ab3"} Dec 10 11:00:07 crc kubenswrapper[4780]: I1210 11:00:07.432619 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" event={"ID":"0255c5ce-87c5-455f-a1e4-bca36b91e355","Type":"ContainerStarted","Data":"0d630ef72fff6d41ee532c8e3abd532bf18aee81bfa73c14df75b031fd43d865"} Dec 10 11:00:07 crc kubenswrapper[4780]: I1210 11:00:07.440162 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" event={"ID":"42449c87-f0c6-4433-92cb-f89e51cb5a14","Type":"ContainerStarted","Data":"93b000fa860e2a5b52da2e7f127eca887222e05929c9c1941a1064c076539eef"} Dec 10 11:00:07 crc kubenswrapper[4780]: I1210 11:00:07.461721 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" podStartSLOduration=7.461687486 podStartE2EDuration="7.461687486s" podCreationTimestamp="2025-12-10 11:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:00:07.455396946 +0000 UTC m=+912.308790399" watchObservedRunningTime="2025-12-10 11:00:07.461687486 +0000 UTC m=+912.315080929" Dec 10 11:00:07 crc kubenswrapper[4780]: I1210 11:00:07.496235 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f" podStartSLOduration=-9223371960.358572 podStartE2EDuration="1m16.496203833s" podCreationTimestamp="2025-12-10 10:58:51 +0000 UTC" firstStartedPulling="2025-12-10 10:59:27.577326673 +0000 UTC m=+872.430720116" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:00:07.48309853 +0000 UTC m=+912.336491983" watchObservedRunningTime="2025-12-10 11:00:07.496203833 +0000 UTC m=+912.349597276" Dec 10 11:00:08 crc kubenswrapper[4780]: I1210 11:00:08.454501 4780 generic.go:334] "Generic (PLEG): container finished" podID="0255c5ce-87c5-455f-a1e4-bca36b91e355" containerID="0d630ef72fff6d41ee532c8e3abd532bf18aee81bfa73c14df75b031fd43d865" exitCode=0 Dec 10 11:00:08 crc kubenswrapper[4780]: I1210 11:00:08.454556 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" event={"ID":"0255c5ce-87c5-455f-a1e4-bca36b91e355","Type":"ContainerDied","Data":"0d630ef72fff6d41ee532c8e3abd532bf18aee81bfa73c14df75b031fd43d865"} Dec 10 11:00:09 crc kubenswrapper[4780]: I1210 11:00:09.504264 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" event={"ID":"52db28df-3ad3-4c20-9c42-168bb32f6c08","Type":"ContainerStarted","Data":"e188d30b9f3199693533f598d15afcebeac5b1d61aca4c79814601d4bc37959a"} Dec 10 11:00:09 crc kubenswrapper[4780]: I1210 11:00:09.506202 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" Dec 10 11:00:09 crc kubenswrapper[4780]: I1210 11:00:09.532706 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" event={"ID":"fb9d5eb8-6ba0-4dea-8226-a3e362924f16","Type":"ContainerStarted","Data":"36b17b3364aea2a4ff2af8913eaed47dab9e3d7538793569ce25d997658df03b"} Dec 10 11:00:09 crc kubenswrapper[4780]: I1210 11:00:09.533880 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 11:00:09 crc kubenswrapper[4780]: I1210 11:00:09.544408 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" podStartSLOduration=2.449471483 podStartE2EDuration="12.544379752s" podCreationTimestamp="2025-12-10 10:59:57 +0000 UTC" firstStartedPulling="2025-12-10 10:59:58.658089031 +0000 UTC m=+903.511482464" lastFinishedPulling="2025-12-10 11:00:08.75299729 +0000 UTC m=+913.606390733" observedRunningTime="2025-12-10 11:00:09.542785672 +0000 UTC m=+914.396179115" watchObservedRunningTime="2025-12-10 11:00:09.544379752 +0000 UTC m=+914.397773195" Dec 10 11:00:09 crc kubenswrapper[4780]: I1210 11:00:09.549018 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-qrnfx" event={"ID":"25874150-d93c-4f21-a259-e4993d52a783","Type":"ContainerStarted","Data":"c9bf66fa0de5e0f107fb79469e557d805a01d6d758f7308b219bfafd339ae79d"} Dec 10 11:00:09 crc kubenswrapper[4780]: I1210 11:00:09.650529 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" podStartSLOduration=37.131856828 podStartE2EDuration="1m17.65050373s" podCreationTimestamp="2025-12-10 10:58:52 +0000 UTC" firstStartedPulling="2025-12-10 10:59:28.227191966 +0000 UTC m=+873.080585409" lastFinishedPulling="2025-12-10 11:00:08.745838868 +0000 UTC m=+913.599232311" observedRunningTime="2025-12-10 11:00:09.576324814 +0000 UTC m=+914.429718257" watchObservedRunningTime="2025-12-10 11:00:09.65050373 +0000 UTC m=+914.503897173" Dec 10 11:00:09 crc kubenswrapper[4780]: I1210 11:00:09.650684 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-qrnfx" podStartSLOduration=2.372400264 podStartE2EDuration="12.650678655s" podCreationTimestamp="2025-12-10 10:59:57 +0000 UTC" firstStartedPulling="2025-12-10 10:59:58.456587108 +0000 UTC m=+903.309980541" lastFinishedPulling="2025-12-10 11:00:08.734865489 +0000 UTC m=+913.588258932" observedRunningTime="2025-12-10 11:00:09.647627137 +0000 UTC m=+914.501020600" watchObservedRunningTime="2025-12-10 11:00:09.650678655 +0000 UTC m=+914.504072098" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.284054 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.440087 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0255c5ce-87c5-455f-a1e4-bca36b91e355-secret-volume\") pod \"0255c5ce-87c5-455f-a1e4-bca36b91e355\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.440152 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx9nv\" (UniqueName: \"kubernetes.io/projected/0255c5ce-87c5-455f-a1e4-bca36b91e355-kube-api-access-hx9nv\") pod \"0255c5ce-87c5-455f-a1e4-bca36b91e355\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.440308 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0255c5ce-87c5-455f-a1e4-bca36b91e355-config-volume\") pod \"0255c5ce-87c5-455f-a1e4-bca36b91e355\" (UID: \"0255c5ce-87c5-455f-a1e4-bca36b91e355\") " Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.441443 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0255c5ce-87c5-455f-a1e4-bca36b91e355-config-volume" (OuterVolumeSpecName: "config-volume") pod "0255c5ce-87c5-455f-a1e4-bca36b91e355" (UID: "0255c5ce-87c5-455f-a1e4-bca36b91e355"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.530022 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0255c5ce-87c5-455f-a1e4-bca36b91e355-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0255c5ce-87c5-455f-a1e4-bca36b91e355" (UID: "0255c5ce-87c5-455f-a1e4-bca36b91e355"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.530043 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0255c5ce-87c5-455f-a1e4-bca36b91e355-kube-api-access-hx9nv" (OuterVolumeSpecName: "kube-api-access-hx9nv") pod "0255c5ce-87c5-455f-a1e4-bca36b91e355" (UID: "0255c5ce-87c5-455f-a1e4-bca36b91e355"). InnerVolumeSpecName "kube-api-access-hx9nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.542719 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0255c5ce-87c5-455f-a1e4-bca36b91e355-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.542796 4780 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0255c5ce-87c5-455f-a1e4-bca36b91e355-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.542811 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx9nv\" (UniqueName: \"kubernetes.io/projected/0255c5ce-87c5-455f-a1e4-bca36b91e355-kube-api-access-hx9nv\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.565478 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" event={"ID":"0255c5ce-87c5-455f-a1e4-bca36b91e355","Type":"ContainerDied","Data":"b7b549a06ce2648e9db2d134a2e4853f1827bb8f429ea9d428e0ae05c81c5ab3"} Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.565611 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7b549a06ce2648e9db2d134a2e4853f1827bb8f429ea9d428e0ae05c81c5ab3" Dec 10 11:00:10 crc kubenswrapper[4780]: I1210 11:00:10.565703 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm" Dec 10 11:00:11 crc kubenswrapper[4780]: I1210 11:00:11.574558 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gzx8b" event={"ID":"1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81","Type":"ContainerStarted","Data":"9fd9a20d7c27a1bf9b9d022f565af9e688391b1c0f6627aff5c231a316c06c2a"} Dec 10 11:00:11 crc kubenswrapper[4780]: I1210 11:00:11.596580 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-gzx8b" podStartSLOduration=2.937432539 podStartE2EDuration="14.59655133s" podCreationTimestamp="2025-12-10 10:59:57 +0000 UTC" firstStartedPulling="2025-12-10 10:59:58.634343787 +0000 UTC m=+903.487737240" lastFinishedPulling="2025-12-10 11:00:10.293462588 +0000 UTC m=+915.146856031" observedRunningTime="2025-12-10 11:00:11.591193483 +0000 UTC m=+916.444586936" watchObservedRunningTime="2025-12-10 11:00:11.59655133 +0000 UTC m=+916.449944773" Dec 10 11:00:18 crc kubenswrapper[4780]: I1210 11:00:18.141809 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-gv6zp" Dec 10 11:00:22 crc kubenswrapper[4780]: I1210 11:00:22.421011 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-m5pj4" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.591472 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck"] Dec 10 11:00:42 crc kubenswrapper[4780]: E1210 11:00:42.592537 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0255c5ce-87c5-455f-a1e4-bca36b91e355" containerName="collect-profiles" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.592555 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0255c5ce-87c5-455f-a1e4-bca36b91e355" containerName="collect-profiles" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.592765 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0255c5ce-87c5-455f-a1e4-bca36b91e355" containerName="collect-profiles" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.594146 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.596481 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.603076 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck"] Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.749178 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mtjj\" (UniqueName: \"kubernetes.io/projected/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-kube-api-access-6mtjj\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.749278 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.749327 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.796518 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7"] Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.814600 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.827512 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7"] Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.850341 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mtjj\" (UniqueName: \"kubernetes.io/projected/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-kube-api-access-6mtjj\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.850405 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.850427 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.851053 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.851505 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.876781 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mtjj\" (UniqueName: \"kubernetes.io/projected/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-kube-api-access-6mtjj\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.924713 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.952140 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wrkv\" (UniqueName: \"kubernetes.io/projected/da7532fb-0c99-41ac-a1ba-49435b50929f-kube-api-access-9wrkv\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.952213 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:42 crc kubenswrapper[4780]: I1210 11:00:42.952252 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.053941 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.054072 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.054271 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wrkv\" (UniqueName: \"kubernetes.io/projected/da7532fb-0c99-41ac-a1ba-49435b50929f-kube-api-access-9wrkv\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.054599 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.055141 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.102613 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wrkv\" (UniqueName: \"kubernetes.io/projected/da7532fb-0c99-41ac-a1ba-49435b50929f-kube-api-access-9wrkv\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.156037 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.445501 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck"] Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.484388 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" event={"ID":"35c9ca81-c52f-42cc-be90-e863fd7c6bc1","Type":"ContainerStarted","Data":"6380e90eb31bacec8283d542182c161bdde0799243dcefee85ad6fe0c1453172"} Dec 10 11:00:43 crc kubenswrapper[4780]: I1210 11:00:43.895308 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7"] Dec 10 11:00:43 crc kubenswrapper[4780]: W1210 11:00:43.897857 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda7532fb_0c99_41ac_a1ba_49435b50929f.slice/crio-64ae9e0bdc13cc6234990f0ca341af0ea0417e7093797b04f42a729b49b1b90c WatchSource:0}: Error finding container 64ae9e0bdc13cc6234990f0ca341af0ea0417e7093797b04f42a729b49b1b90c: Status 404 returned error can't find the container with id 64ae9e0bdc13cc6234990f0ca341af0ea0417e7093797b04f42a729b49b1b90c Dec 10 11:00:44 crc kubenswrapper[4780]: I1210 11:00:44.496039 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" event={"ID":"da7532fb-0c99-41ac-a1ba-49435b50929f","Type":"ContainerStarted","Data":"64ae9e0bdc13cc6234990f0ca341af0ea0417e7093797b04f42a729b49b1b90c"} Dec 10 11:00:45 crc kubenswrapper[4780]: I1210 11:00:45.511378 4780 generic.go:334] "Generic (PLEG): container finished" podID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerID="6791ad8ad3bab5028e841c7b76468f7e576780aad46f3879e768663e8eecf8ab" exitCode=0 Dec 10 11:00:45 crc kubenswrapper[4780]: I1210 11:00:45.511508 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" event={"ID":"35c9ca81-c52f-42cc-be90-e863fd7c6bc1","Type":"ContainerDied","Data":"6791ad8ad3bab5028e841c7b76468f7e576780aad46f3879e768663e8eecf8ab"} Dec 10 11:00:45 crc kubenswrapper[4780]: I1210 11:00:45.515751 4780 generic.go:334] "Generic (PLEG): container finished" podID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerID="817de75e7f9e74765d9d93c23114fb2515b039eab5706b314fa3c605bc6c27a8" exitCode=0 Dec 10 11:00:45 crc kubenswrapper[4780]: I1210 11:00:45.515820 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" event={"ID":"da7532fb-0c99-41ac-a1ba-49435b50929f","Type":"ContainerDied","Data":"817de75e7f9e74765d9d93c23114fb2515b039eab5706b314fa3c605bc6c27a8"} Dec 10 11:00:47 crc kubenswrapper[4780]: I1210 11:00:47.541589 4780 generic.go:334] "Generic (PLEG): container finished" podID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerID="bcd52607d478d62efb9c0ba6113a75d0e4004f5914f3fd685611612c4657a3cb" exitCode=0 Dec 10 11:00:47 crc kubenswrapper[4780]: I1210 11:00:47.541678 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" event={"ID":"da7532fb-0c99-41ac-a1ba-49435b50929f","Type":"ContainerDied","Data":"bcd52607d478d62efb9c0ba6113a75d0e4004f5914f3fd685611612c4657a3cb"} Dec 10 11:00:48 crc kubenswrapper[4780]: I1210 11:00:48.551253 4780 generic.go:334] "Generic (PLEG): container finished" podID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerID="ea7858835c4dde5aed74ff0ec93782cc20ffad5ac8d8e8ac76cc785217c00926" exitCode=0 Dec 10 11:00:48 crc kubenswrapper[4780]: I1210 11:00:48.551488 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" event={"ID":"35c9ca81-c52f-42cc-be90-e863fd7c6bc1","Type":"ContainerDied","Data":"ea7858835c4dde5aed74ff0ec93782cc20ffad5ac8d8e8ac76cc785217c00926"} Dec 10 11:00:48 crc kubenswrapper[4780]: I1210 11:00:48.557996 4780 generic.go:334] "Generic (PLEG): container finished" podID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerID="114bcbc506080acc8bf24ee2b7b144280fdce1ea5a5bc12317b90591a2e34a98" exitCode=0 Dec 10 11:00:48 crc kubenswrapper[4780]: I1210 11:00:48.558073 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" event={"ID":"da7532fb-0c99-41ac-a1ba-49435b50929f","Type":"ContainerDied","Data":"114bcbc506080acc8bf24ee2b7b144280fdce1ea5a5bc12317b90591a2e34a98"} Dec 10 11:00:49 crc kubenswrapper[4780]: I1210 11:00:49.569257 4780 generic.go:334] "Generic (PLEG): container finished" podID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerID="d34e5f4f2ef7432f85c619f7b52e6876a994253a158186d964a8ae0c22c919a7" exitCode=0 Dec 10 11:00:49 crc kubenswrapper[4780]: I1210 11:00:49.569352 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" event={"ID":"35c9ca81-c52f-42cc-be90-e863fd7c6bc1","Type":"ContainerDied","Data":"d34e5f4f2ef7432f85c619f7b52e6876a994253a158186d964a8ae0c22c919a7"} Dec 10 11:00:49 crc kubenswrapper[4780]: I1210 11:00:49.880140 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:49 crc kubenswrapper[4780]: I1210 11:00:49.991601 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wrkv\" (UniqueName: \"kubernetes.io/projected/da7532fb-0c99-41ac-a1ba-49435b50929f-kube-api-access-9wrkv\") pod \"da7532fb-0c99-41ac-a1ba-49435b50929f\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " Dec 10 11:00:49 crc kubenswrapper[4780]: I1210 11:00:49.991701 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-util\") pod \"da7532fb-0c99-41ac-a1ba-49435b50929f\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " Dec 10 11:00:49 crc kubenswrapper[4780]: I1210 11:00:49.991841 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-bundle\") pod \"da7532fb-0c99-41ac-a1ba-49435b50929f\" (UID: \"da7532fb-0c99-41ac-a1ba-49435b50929f\") " Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.011314 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-bundle" (OuterVolumeSpecName: "bundle") pod "da7532fb-0c99-41ac-a1ba-49435b50929f" (UID: "da7532fb-0c99-41ac-a1ba-49435b50929f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.013420 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-util" (OuterVolumeSpecName: "util") pod "da7532fb-0c99-41ac-a1ba-49435b50929f" (UID: "da7532fb-0c99-41ac-a1ba-49435b50929f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.015853 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da7532fb-0c99-41ac-a1ba-49435b50929f-kube-api-access-9wrkv" (OuterVolumeSpecName: "kube-api-access-9wrkv") pod "da7532fb-0c99-41ac-a1ba-49435b50929f" (UID: "da7532fb-0c99-41ac-a1ba-49435b50929f"). InnerVolumeSpecName "kube-api-access-9wrkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.094233 4780 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.094309 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wrkv\" (UniqueName: \"kubernetes.io/projected/da7532fb-0c99-41ac-a1ba-49435b50929f-kube-api-access-9wrkv\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.094321 4780 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da7532fb-0c99-41ac-a1ba-49435b50929f-util\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.589402 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" event={"ID":"da7532fb-0c99-41ac-a1ba-49435b50929f","Type":"ContainerDied","Data":"64ae9e0bdc13cc6234990f0ca341af0ea0417e7093797b04f42a729b49b1b90c"} Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.589463 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64ae9e0bdc13cc6234990f0ca341af0ea0417e7093797b04f42a729b49b1b90c" Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.589468 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7" Dec 10 11:00:50 crc kubenswrapper[4780]: I1210 11:00:50.843188 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.014876 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-util\") pod \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.015035 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-bundle\") pod \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.015090 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mtjj\" (UniqueName: \"kubernetes.io/projected/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-kube-api-access-6mtjj\") pod \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\" (UID: \"35c9ca81-c52f-42cc-be90-e863fd7c6bc1\") " Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.018026 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-bundle" (OuterVolumeSpecName: "bundle") pod "35c9ca81-c52f-42cc-be90-e863fd7c6bc1" (UID: "35c9ca81-c52f-42cc-be90-e863fd7c6bc1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.018664 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-kube-api-access-6mtjj" (OuterVolumeSpecName: "kube-api-access-6mtjj") pod "35c9ca81-c52f-42cc-be90-e863fd7c6bc1" (UID: "35c9ca81-c52f-42cc-be90-e863fd7c6bc1"). InnerVolumeSpecName "kube-api-access-6mtjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.028981 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-util" (OuterVolumeSpecName: "util") pod "35c9ca81-c52f-42cc-be90-e863fd7c6bc1" (UID: "35c9ca81-c52f-42cc-be90-e863fd7c6bc1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.117951 4780 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.118002 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mtjj\" (UniqueName: \"kubernetes.io/projected/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-kube-api-access-6mtjj\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.118013 4780 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/35c9ca81-c52f-42cc-be90-e863fd7c6bc1-util\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.599503 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" event={"ID":"35c9ca81-c52f-42cc-be90-e863fd7c6bc1","Type":"ContainerDied","Data":"6380e90eb31bacec8283d542182c161bdde0799243dcefee85ad6fe0c1453172"} Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.599942 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6380e90eb31bacec8283d542182c161bdde0799243dcefee85ad6fe0c1453172" Dec 10 11:00:51 crc kubenswrapper[4780]: I1210 11:00:51.599551 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.634471 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7"] Dec 10 11:00:58 crc kubenswrapper[4780]: E1210 11:00:58.635572 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerName="util" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.635601 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerName="util" Dec 10 11:00:58 crc kubenswrapper[4780]: E1210 11:00:58.635635 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerName="pull" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.635642 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerName="pull" Dec 10 11:00:58 crc kubenswrapper[4780]: E1210 11:00:58.635664 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerName="extract" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.635671 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerName="extract" Dec 10 11:00:58 crc kubenswrapper[4780]: E1210 11:00:58.635680 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerName="util" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.635686 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerName="util" Dec 10 11:00:58 crc kubenswrapper[4780]: E1210 11:00:58.635694 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerName="extract" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.635699 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerName="extract" Dec 10 11:00:58 crc kubenswrapper[4780]: E1210 11:00:58.635711 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerName="pull" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.635716 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerName="pull" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.635948 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="da7532fb-0c99-41ac-a1ba-49435b50929f" containerName="extract" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.635976 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c9ca81-c52f-42cc-be90-e863fd7c6bc1" containerName="extract" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.639779 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.647991 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.652521 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-vps6n" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.653140 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.653510 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.653814 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.654083 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.698610 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7"] Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.767987 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-apiservice-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.768072 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz5wl\" (UniqueName: \"kubernetes.io/projected/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-kube-api-access-hz5wl\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.768095 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-manager-config\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.768129 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.768163 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-webhook-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.870580 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz5wl\" (UniqueName: \"kubernetes.io/projected/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-kube-api-access-hz5wl\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.870658 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-manager-config\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.870697 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.870738 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-webhook-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.870837 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-apiservice-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.872302 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-manager-config\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.883726 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-webhook-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.884360 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-apiservice-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.885384 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.930530 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz5wl\" (UniqueName: \"kubernetes.io/projected/1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa-kube-api-access-hz5wl\") pod \"loki-operator-controller-manager-54599dc8c7-jsrb7\" (UID: \"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa\") " pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:58 crc kubenswrapper[4780]: I1210 11:00:58.960848 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:00:59 crc kubenswrapper[4780]: I1210 11:00:59.501063 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7"] Dec 10 11:00:59 crc kubenswrapper[4780]: W1210 11:00:59.517592 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c2ed53a_a11a_4eb0_b91e_cba4a0a510fa.slice/crio-17ee4e27109da0a170710cbd6e9998278ed6b43f295a3f41b29cfeded955c3e8 WatchSource:0}: Error finding container 17ee4e27109da0a170710cbd6e9998278ed6b43f295a3f41b29cfeded955c3e8: Status 404 returned error can't find the container with id 17ee4e27109da0a170710cbd6e9998278ed6b43f295a3f41b29cfeded955c3e8 Dec 10 11:00:59 crc kubenswrapper[4780]: I1210 11:00:59.663314 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" event={"ID":"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa","Type":"ContainerStarted","Data":"17ee4e27109da0a170710cbd6e9998278ed6b43f295a3f41b29cfeded955c3e8"} Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.176848 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-mswnk"] Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.179903 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.185824 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.186668 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.187993 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-p5777" Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.201375 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-mswnk"] Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.276012 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zsf9\" (UniqueName: \"kubernetes.io/projected/358d8d56-efec-4f25-be4c-9552b279f46f-kube-api-access-9zsf9\") pod \"cluster-logging-operator-ff9846bd-mswnk\" (UID: \"358d8d56-efec-4f25-be4c-9552b279f46f\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.377836 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zsf9\" (UniqueName: \"kubernetes.io/projected/358d8d56-efec-4f25-be4c-9552b279f46f-kube-api-access-9zsf9\") pod \"cluster-logging-operator-ff9846bd-mswnk\" (UID: \"358d8d56-efec-4f25-be4c-9552b279f46f\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.409334 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zsf9\" (UniqueName: \"kubernetes.io/projected/358d8d56-efec-4f25-be4c-9552b279f46f-kube-api-access-9zsf9\") pod \"cluster-logging-operator-ff9846bd-mswnk\" (UID: \"358d8d56-efec-4f25-be4c-9552b279f46f\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" Dec 10 11:01:04 crc kubenswrapper[4780]: I1210 11:01:04.529889 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" Dec 10 11:01:11 crc kubenswrapper[4780]: I1210 11:01:11.576220 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-mswnk"] Dec 10 11:01:12 crc kubenswrapper[4780]: I1210 11:01:12.010880 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" event={"ID":"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa","Type":"ContainerStarted","Data":"d45da67c960cf9efab10af459bb7b05ff53665ee1bebdbe689df7d39c991c53c"} Dec 10 11:01:12 crc kubenswrapper[4780]: I1210 11:01:12.013428 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" event={"ID":"358d8d56-efec-4f25-be4c-9552b279f46f","Type":"ContainerStarted","Data":"728a91b813988af16684470882088692331e79c0ef70ee60f1f66fef5b94045a"} Dec 10 11:01:29 crc kubenswrapper[4780]: E1210 11:01:29.187700 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:f15ebe396f96093861d528a3307a3e38ac2d4dff594f793c1e56011a3a909175" Dec 10 11:01:29 crc kubenswrapper[4780]: E1210 11:01:29.189340 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cluster-logging-operator,Image:registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:f15ebe396f96093861d528a3307a3e38ac2d4dff594f793c1e56011a3a909175,Command:[cluster-logging-operator],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:WATCH_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.annotations['olm.targetNamespaces'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:OPERATOR_NAME,Value:cluster-logging-operator,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_VECTOR,Value:registry.redhat.io/openshift-logging/vector-rhel9@sha256:438aa27c0408214bc64d01b20f233d698fa48344aa2dd878bc090232f227c17c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_LOG_FILE_METRIC_EXPORTER,Value:registry.redhat.io/openshift-logging/log-file-metric-exporter-rhel9@sha256:d4512c0a403fa3cddc646e566f5b6c77ac17c0558a08d8f99ffcdafaba9fba3a,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-logging.v6.2.6,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9zsf9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000690000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cluster-logging-operator-ff9846bd-mswnk_openshift-logging(358d8d56-efec-4f25-be4c-9552b279f46f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 11:01:29 crc kubenswrapper[4780]: E1210 11:01:29.190755 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-logging-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" podUID="358d8d56-efec-4f25-be4c-9552b279f46f" Dec 10 11:01:29 crc kubenswrapper[4780]: E1210 11:01:29.771126 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-logging-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift-logging/cluster-logging-rhel9-operator@sha256:f15ebe396f96093861d528a3307a3e38ac2d4dff594f793c1e56011a3a909175\\\"\"" pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" podUID="358d8d56-efec-4f25-be4c-9552b279f46f" Dec 10 11:01:30 crc kubenswrapper[4780]: E1210 11:01:30.417091 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:145e9784b681ac7defa0a1547c03a6db9a587bf9be2820428eee84f58f8f1f24" Dec 10 11:01:30 crc kubenswrapper[4780]: E1210 11:01:30.417566 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:145e9784b681ac7defa0a1547c03a6db9a587bf9be2820428eee84f58f8f1f24,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --tls-cert-file=/var/run/secrets/serving-cert/tls.crt --tls-private-key-file=/var/run/secrets/serving-cert/tls.key --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256 --tls-min-version=VersionTLS12 --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:loki-operator.v6.2.6,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:loki-operator-metrics-cert,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hz5wl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000700000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod loki-operator-controller-manager-54599dc8c7-jsrb7_openshift-operators-redhat(1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 11:01:30 crc kubenswrapper[4780]: E1210 11:01:30.418824 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" podUID="1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa" Dec 10 11:01:30 crc kubenswrapper[4780]: I1210 11:01:30.776542 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:01:30 crc kubenswrapper[4780]: E1210 11:01:30.779798 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:145e9784b681ac7defa0a1547c03a6db9a587bf9be2820428eee84f58f8f1f24\\\"\"" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" podUID="1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa" Dec 10 11:01:30 crc kubenswrapper[4780]: I1210 11:01:30.780889 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" Dec 10 11:01:31 crc kubenswrapper[4780]: E1210 11:01:31.785877 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:145e9784b681ac7defa0a1547c03a6db9a587bf9be2820428eee84f58f8f1f24\\\"\"" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" podUID="1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa" Dec 10 11:01:32 crc kubenswrapper[4780]: E1210 11:01:32.791375 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:145e9784b681ac7defa0a1547c03a6db9a587bf9be2820428eee84f58f8f1f24\\\"\"" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" podUID="1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa" Dec 10 11:01:42 crc kubenswrapper[4780]: I1210 11:01:42.977745 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" event={"ID":"358d8d56-efec-4f25-be4c-9552b279f46f","Type":"ContainerStarted","Data":"98e76e9ddc8d0ede9d3dbd055362de542966804e6313154aff80fc90b4fc0f2f"} Dec 10 11:01:43 crc kubenswrapper[4780]: I1210 11:01:43.002940 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-mswnk" podStartSLOduration=8.76314946 podStartE2EDuration="39.002850559s" podCreationTimestamp="2025-12-10 11:01:04 +0000 UTC" firstStartedPulling="2025-12-10 11:01:11.599883686 +0000 UTC m=+976.453277129" lastFinishedPulling="2025-12-10 11:01:41.839584785 +0000 UTC m=+1006.692978228" observedRunningTime="2025-12-10 11:01:42.997144464 +0000 UTC m=+1007.850537917" watchObservedRunningTime="2025-12-10 11:01:43.002850559 +0000 UTC m=+1007.856244002" Dec 10 11:01:47 crc kubenswrapper[4780]: I1210 11:01:47.050817 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" event={"ID":"1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa","Type":"ContainerStarted","Data":"fac076280839fa40ce3af162e9715806ffd424cd57e207cba28f874e4fc991e0"} Dec 10 11:01:47 crc kubenswrapper[4780]: I1210 11:01:47.093101 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-54599dc8c7-jsrb7" podStartSLOduration=1.964706646 podStartE2EDuration="49.093064747s" podCreationTimestamp="2025-12-10 11:00:58 +0000 UTC" firstStartedPulling="2025-12-10 11:00:59.52244357 +0000 UTC m=+964.375837013" lastFinishedPulling="2025-12-10 11:01:46.650801661 +0000 UTC m=+1011.504195114" observedRunningTime="2025-12-10 11:01:47.092883942 +0000 UTC m=+1011.946277385" watchObservedRunningTime="2025-12-10 11:01:47.093064747 +0000 UTC m=+1011.946458190" Dec 10 11:01:50 crc kubenswrapper[4780]: I1210 11:01:50.562624 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lfzkp"] Dec 10 11:01:50 crc kubenswrapper[4780]: I1210 11:01:50.565453 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:50 crc kubenswrapper[4780]: I1210 11:01:50.586945 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lfzkp"] Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.088839 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxgvm\" (UniqueName: \"kubernetes.io/projected/85b7225d-3a09-42e4-8fcb-50eede1ecff1-kube-api-access-hxgvm\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.090084 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-utilities\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.090343 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-catalog-content\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.195164 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-utilities\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.195279 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-catalog-content\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.195832 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxgvm\" (UniqueName: \"kubernetes.io/projected/85b7225d-3a09-42e4-8fcb-50eede1ecff1-kube-api-access-hxgvm\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.196521 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-utilities\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.198114 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-catalog-content\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.230083 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxgvm\" (UniqueName: \"kubernetes.io/projected/85b7225d-3a09-42e4-8fcb-50eede1ecff1-kube-api-access-hxgvm\") pod \"community-operators-lfzkp\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.326240 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.327576 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.333378 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.334793 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.335565 4780 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-bs7dd" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.335710 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.490732 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.504388 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh5fk\" (UniqueName: \"kubernetes.io/projected/72f0b811-aabe-4dfb-aa01-160bea6c13b4-kube-api-access-kh5fk\") pod \"minio\" (UID: \"72f0b811-aabe-4dfb-aa01-160bea6c13b4\") " pod="minio-dev/minio" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.504488 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-47e69932-a96a-4bb4-a4a7-b1e4dde04f70\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-47e69932-a96a-4bb4-a4a7-b1e4dde04f70\") pod \"minio\" (UID: \"72f0b811-aabe-4dfb-aa01-160bea6c13b4\") " pod="minio-dev/minio" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.606339 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh5fk\" (UniqueName: \"kubernetes.io/projected/72f0b811-aabe-4dfb-aa01-160bea6c13b4-kube-api-access-kh5fk\") pod \"minio\" (UID: \"72f0b811-aabe-4dfb-aa01-160bea6c13b4\") " pod="minio-dev/minio" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.606407 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-47e69932-a96a-4bb4-a4a7-b1e4dde04f70\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-47e69932-a96a-4bb4-a4a7-b1e4dde04f70\") pod \"minio\" (UID: \"72f0b811-aabe-4dfb-aa01-160bea6c13b4\") " pod="minio-dev/minio" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.618233 4780 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.618286 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-47e69932-a96a-4bb4-a4a7-b1e4dde04f70\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-47e69932-a96a-4bb4-a4a7-b1e4dde04f70\") pod \"minio\" (UID: \"72f0b811-aabe-4dfb-aa01-160bea6c13b4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5481a8dbb592017ad6b5fe7d631cda0debf79bdde8da1ad074dc6dcc087c0259/globalmount\"" pod="minio-dev/minio" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.628136 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh5fk\" (UniqueName: \"kubernetes.io/projected/72f0b811-aabe-4dfb-aa01-160bea6c13b4-kube-api-access-kh5fk\") pod \"minio\" (UID: \"72f0b811-aabe-4dfb-aa01-160bea6c13b4\") " pod="minio-dev/minio" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.698133 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-47e69932-a96a-4bb4-a4a7-b1e4dde04f70\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-47e69932-a96a-4bb4-a4a7-b1e4dde04f70\") pod \"minio\" (UID: \"72f0b811-aabe-4dfb-aa01-160bea6c13b4\") " pod="minio-dev/minio" Dec 10 11:01:51 crc kubenswrapper[4780]: I1210 11:01:51.965967 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 10 11:01:52 crc kubenswrapper[4780]: I1210 11:01:52.018610 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lfzkp"] Dec 10 11:01:52 crc kubenswrapper[4780]: I1210 11:01:52.137710 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lfzkp" event={"ID":"85b7225d-3a09-42e4-8fcb-50eede1ecff1","Type":"ContainerStarted","Data":"ded1efd6cc2fced64aae3adad5102e60eb292ec155e853dfc89e39f9695be431"} Dec 10 11:01:52 crc kubenswrapper[4780]: I1210 11:01:52.624286 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 10 11:01:53 crc kubenswrapper[4780]: I1210 11:01:53.205205 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"72f0b811-aabe-4dfb-aa01-160bea6c13b4","Type":"ContainerStarted","Data":"856a981f59f334db206a5972999e11633b17797f411555c3db8b12ef8d7fd9bc"} Dec 10 11:01:53 crc kubenswrapper[4780]: I1210 11:01:53.214849 4780 generic.go:334] "Generic (PLEG): container finished" podID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerID="e3567e710adc443ea47f4c1575e16e6ef0dd07c9fa9128b2a8d44a26090b97c1" exitCode=0 Dec 10 11:01:53 crc kubenswrapper[4780]: I1210 11:01:53.215032 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lfzkp" event={"ID":"85b7225d-3a09-42e4-8fcb-50eede1ecff1","Type":"ContainerDied","Data":"e3567e710adc443ea47f4c1575e16e6ef0dd07c9fa9128b2a8d44a26090b97c1"} Dec 10 11:01:55 crc kubenswrapper[4780]: I1210 11:01:55.942227 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vr2cr"] Dec 10 11:01:55 crc kubenswrapper[4780]: I1210 11:01:55.944339 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:55 crc kubenswrapper[4780]: I1210 11:01:55.972455 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vr2cr"] Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.064195 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-utilities\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.064364 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-catalog-content\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.064632 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qg6m\" (UniqueName: \"kubernetes.io/projected/fa780797-49d6-443b-948f-d469f590ff89-kube-api-access-4qg6m\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.166887 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qg6m\" (UniqueName: \"kubernetes.io/projected/fa780797-49d6-443b-948f-d469f590ff89-kube-api-access-4qg6m\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.167055 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-utilities\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.167082 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-catalog-content\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.167865 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-catalog-content\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.169027 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-utilities\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.211292 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qg6m\" (UniqueName: \"kubernetes.io/projected/fa780797-49d6-443b-948f-d469f590ff89-kube-api-access-4qg6m\") pod \"redhat-marketplace-vr2cr\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.280361 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:01:56 crc kubenswrapper[4780]: I1210 11:01:56.556565 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vr2cr"] Dec 10 11:01:57 crc kubenswrapper[4780]: I1210 11:01:57.254405 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vr2cr" event={"ID":"fa780797-49d6-443b-948f-d469f590ff89","Type":"ContainerStarted","Data":"ec5c910316187f66dcda77986ffe54dbe334f732d4e754a3c44c9e5b9fe2b900"} Dec 10 11:01:57 crc kubenswrapper[4780]: I1210 11:01:57.476072 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:01:57 crc kubenswrapper[4780]: I1210 11:01:57.476174 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:01:58 crc kubenswrapper[4780]: I1210 11:01:58.268336 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vr2cr" event={"ID":"fa780797-49d6-443b-948f-d469f590ff89","Type":"ContainerStarted","Data":"9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592"} Dec 10 11:01:59 crc kubenswrapper[4780]: I1210 11:01:59.280042 4780 generic.go:334] "Generic (PLEG): container finished" podID="fa780797-49d6-443b-948f-d469f590ff89" containerID="9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592" exitCode=0 Dec 10 11:01:59 crc kubenswrapper[4780]: I1210 11:01:59.280121 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vr2cr" event={"ID":"fa780797-49d6-443b-948f-d469f590ff89","Type":"ContainerDied","Data":"9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592"} Dec 10 11:02:04 crc kubenswrapper[4780]: I1210 11:02:04.884236 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g66q8"] Dec 10 11:02:04 crc kubenswrapper[4780]: I1210 11:02:04.886273 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:04 crc kubenswrapper[4780]: I1210 11:02:04.902583 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-utilities\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:04 crc kubenswrapper[4780]: I1210 11:02:04.902685 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfvrq\" (UniqueName: \"kubernetes.io/projected/f898695c-3396-42b3-9bf5-0fe05c234d39-kube-api-access-qfvrq\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:04 crc kubenswrapper[4780]: I1210 11:02:04.902819 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-catalog-content\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:04 crc kubenswrapper[4780]: I1210 11:02:04.913483 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g66q8"] Dec 10 11:02:05 crc kubenswrapper[4780]: I1210 11:02:05.005303 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-utilities\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:05 crc kubenswrapper[4780]: I1210 11:02:05.005457 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfvrq\" (UniqueName: \"kubernetes.io/projected/f898695c-3396-42b3-9bf5-0fe05c234d39-kube-api-access-qfvrq\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:05 crc kubenswrapper[4780]: I1210 11:02:05.005607 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-catalog-content\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:05 crc kubenswrapper[4780]: I1210 11:02:05.006823 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-catalog-content\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:05 crc kubenswrapper[4780]: I1210 11:02:05.007010 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-utilities\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:05 crc kubenswrapper[4780]: I1210 11:02:05.037414 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfvrq\" (UniqueName: \"kubernetes.io/projected/f898695c-3396-42b3-9bf5-0fe05c234d39-kube-api-access-qfvrq\") pod \"certified-operators-g66q8\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:05 crc kubenswrapper[4780]: I1210 11:02:05.210982 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:06 crc kubenswrapper[4780]: I1210 11:02:06.377384 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g66q8"] Dec 10 11:02:10 crc kubenswrapper[4780]: I1210 11:02:10.161680 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g66q8" event={"ID":"f898695c-3396-42b3-9bf5-0fe05c234d39","Type":"ContainerStarted","Data":"1a6dd50b80d7c159f87a0ffe85ef142bcfe42367fbc300dedd11a791a64d54ec"} Dec 10 11:02:14 crc kubenswrapper[4780]: I1210 11:02:14.252615 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lfzkp" event={"ID":"85b7225d-3a09-42e4-8fcb-50eede1ecff1","Type":"ContainerStarted","Data":"65a94723df02df161f73aacd5f861739e7b90ff193b3df19b9de127d29852ecb"} Dec 10 11:02:14 crc kubenswrapper[4780]: I1210 11:02:14.292017 4780 generic.go:334] "Generic (PLEG): container finished" podID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerID="d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad" exitCode=0 Dec 10 11:02:14 crc kubenswrapper[4780]: I1210 11:02:14.292117 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g66q8" event={"ID":"f898695c-3396-42b3-9bf5-0fe05c234d39","Type":"ContainerDied","Data":"d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad"} Dec 10 11:02:15 crc kubenswrapper[4780]: I1210 11:02:15.303000 4780 generic.go:334] "Generic (PLEG): container finished" podID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerID="65a94723df02df161f73aacd5f861739e7b90ff193b3df19b9de127d29852ecb" exitCode=0 Dec 10 11:02:15 crc kubenswrapper[4780]: I1210 11:02:15.303117 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lfzkp" event={"ID":"85b7225d-3a09-42e4-8fcb-50eede1ecff1","Type":"ContainerDied","Data":"65a94723df02df161f73aacd5f861739e7b90ff193b3df19b9de127d29852ecb"} Dec 10 11:02:16 crc kubenswrapper[4780]: I1210 11:02:16.314052 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vr2cr" event={"ID":"fa780797-49d6-443b-948f-d469f590ff89","Type":"ContainerStarted","Data":"2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73"} Dec 10 11:02:16 crc kubenswrapper[4780]: I1210 11:02:16.319771 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"72f0b811-aabe-4dfb-aa01-160bea6c13b4","Type":"ContainerStarted","Data":"e4417818ef7e294eb975ee33916c95fe9e6b5a8d869bc5d1b0fb52965c6eef98"} Dec 10 11:02:16 crc kubenswrapper[4780]: I1210 11:02:16.365681 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=5.676551102 podStartE2EDuration="28.36561987s" podCreationTimestamp="2025-12-10 11:01:48 +0000 UTC" firstStartedPulling="2025-12-10 11:01:52.639516741 +0000 UTC m=+1017.492910174" lastFinishedPulling="2025-12-10 11:02:15.328585499 +0000 UTC m=+1040.181978942" observedRunningTime="2025-12-10 11:02:16.361098095 +0000 UTC m=+1041.214491558" watchObservedRunningTime="2025-12-10 11:02:16.36561987 +0000 UTC m=+1041.219013313" Dec 10 11:02:17 crc kubenswrapper[4780]: I1210 11:02:17.330211 4780 generic.go:334] "Generic (PLEG): container finished" podID="fa780797-49d6-443b-948f-d469f590ff89" containerID="2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73" exitCode=0 Dec 10 11:02:17 crc kubenswrapper[4780]: I1210 11:02:17.330686 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vr2cr" event={"ID":"fa780797-49d6-443b-948f-d469f590ff89","Type":"ContainerDied","Data":"2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73"} Dec 10 11:02:18 crc kubenswrapper[4780]: I1210 11:02:18.341145 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lfzkp" event={"ID":"85b7225d-3a09-42e4-8fcb-50eede1ecff1","Type":"ContainerStarted","Data":"a3f6af6856818ef7acca738ce1aa462bf086d3706b74eb8ad29189e7ff22980e"} Dec 10 11:02:19 crc kubenswrapper[4780]: I1210 11:02:19.355660 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g66q8" event={"ID":"f898695c-3396-42b3-9bf5-0fe05c234d39","Type":"ContainerStarted","Data":"6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022"} Dec 10 11:02:19 crc kubenswrapper[4780]: I1210 11:02:19.389146 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lfzkp" podStartSLOduration=5.894856696 podStartE2EDuration="29.389109804s" podCreationTimestamp="2025-12-10 11:01:50 +0000 UTC" firstStartedPulling="2025-12-10 11:01:53.218214539 +0000 UTC m=+1018.071607982" lastFinishedPulling="2025-12-10 11:02:16.712467647 +0000 UTC m=+1041.565861090" observedRunningTime="2025-12-10 11:02:19.383353227 +0000 UTC m=+1044.236746670" watchObservedRunningTime="2025-12-10 11:02:19.389109804 +0000 UTC m=+1044.242503247" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.491302 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.491709 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.547261 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.755535 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz"] Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.757016 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.770686 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-vxmn2" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.770768 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.770694 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.770687 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.771244 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.781729 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz"] Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.817186 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.817367 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-config\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.817414 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.817565 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpzwm\" (UniqueName: \"kubernetes.io/projected/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-kube-api-access-gpzwm\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.817601 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.950361 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.950461 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-config\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.950486 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.950523 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpzwm\" (UniqueName: \"kubernetes.io/projected/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-kube-api-access-gpzwm\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.950548 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.954264 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-config\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.955110 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.966310 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.975948 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:21 crc kubenswrapper[4780]: I1210 11:02:21.992625 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpzwm\" (UniqueName: \"kubernetes.io/projected/a59d13f6-10bb-4e7d-96c1-46dff7fac4e2-kube-api-access-gpzwm\") pod \"logging-loki-distributor-76cc67bf56-j5hdz\" (UID: \"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.029078 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-h98vv"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.034909 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.038601 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.039249 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-h98vv"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.046426 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.046667 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.103813 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.109078 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.112774 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.152815 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.155085 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.157131 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.157256 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2qqd\" (UniqueName: \"kubernetes.io/projected/7c0379b8-51d2-4860-be84-18dfcb007969-kube-api-access-h2qqd\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.157305 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c0379b8-51d2-4860-be84-18dfcb007969-config\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.157411 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.157492 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.157580 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.193261 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259263 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259332 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9906b064-40d7-43bc-bb9b-52863d99a2f4-config\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259365 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259448 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259474 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259516 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2qqd\" (UniqueName: \"kubernetes.io/projected/7c0379b8-51d2-4860-be84-18dfcb007969-kube-api-access-h2qqd\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259544 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c0379b8-51d2-4860-be84-18dfcb007969-config\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259578 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259607 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259637 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4b28\" (UniqueName: \"kubernetes.io/projected/9906b064-40d7-43bc-bb9b-52863d99a2f4-kube-api-access-d4b28\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.259668 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.261406 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.261863 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c0379b8-51d2-4860-be84-18dfcb007969-config\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.268021 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-7d6b48847-8n96n"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.269792 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.271204 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.279487 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7d6b48847-8n96n"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.281352 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.282204 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.282369 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.299313 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/7c0379b8-51d2-4860-be84-18dfcb007969-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.299484 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.299740 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.310966 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.316401 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2qqd\" (UniqueName: \"kubernetes.io/projected/7c0379b8-51d2-4860-be84-18dfcb007969-kube-api-access-h2qqd\") pod \"logging-loki-querier-5895d59bb8-h98vv\" (UID: \"7c0379b8-51d2-4860-be84-18dfcb007969\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.317928 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-7d6b48847-cgx9d"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.327515 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.332242 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-r544k" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.367501 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7d6b48847-cgx9d"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.378043 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.378189 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.378272 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4b28\" (UniqueName: \"kubernetes.io/projected/9906b064-40d7-43bc-bb9b-52863d99a2f4-kube-api-access-d4b28\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.379623 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9906b064-40d7-43bc-bb9b-52863d99a2f4-config\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.379656 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.382342 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.390039 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9906b064-40d7-43bc-bb9b-52863d99a2f4-config\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.391035 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.398321 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.421488 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4b28\" (UniqueName: \"kubernetes.io/projected/9906b064-40d7-43bc-bb9b-52863d99a2f4-kube-api-access-d4b28\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.423911 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g66q8" event={"ID":"f898695c-3396-42b3-9bf5-0fe05c234d39","Type":"ContainerDied","Data":"6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022"} Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.424046 4780 generic.go:334] "Generic (PLEG): container finished" podID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerID="6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022" exitCode=0 Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.432890 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/9906b064-40d7-43bc-bb9b-52863d99a2f4-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-tjd86\" (UID: \"9906b064-40d7-43bc-bb9b-52863d99a2f4\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.482695 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.482822 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.482861 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-tenants\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.482893 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs6d8\" (UniqueName: \"kubernetes.io/projected/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-kube-api-access-bs6d8\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.482968 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-lokistack-gateway\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.482995 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483015 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-tenants\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483049 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-rbac\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483086 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-lokistack-gateway\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483121 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483184 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-tls-secret\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483206 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483230 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nbvm\" (UniqueName: \"kubernetes.io/projected/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-kube-api-access-5nbvm\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483254 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-rbac\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483273 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.483312 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-tls-secret\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.505778 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585385 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-tenants\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585459 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-rbac\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585502 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-lokistack-gateway\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585547 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585583 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-tls-secret\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585608 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585645 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nbvm\" (UniqueName: \"kubernetes.io/projected/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-kube-api-access-5nbvm\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585670 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-rbac\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585691 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585759 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-tls-secret\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585807 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585842 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585870 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-tenants\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.585896 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs6d8\" (UniqueName: \"kubernetes.io/projected/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-kube-api-access-bs6d8\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.586240 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-lokistack-gateway\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.586273 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.587228 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-rbac\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.587336 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.587781 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.587901 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.588939 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-rbac\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.593964 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-lokistack-gateway\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.594169 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-tls-secret\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.598327 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-tenants\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.598559 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-tls-secret\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.598680 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-lokistack-gateway\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.591069 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.605785 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.607198 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.609212 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-tenants\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.615779 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nbvm\" (UniqueName: \"kubernetes.io/projected/ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546-kube-api-access-5nbvm\") pod \"logging-loki-gateway-7d6b48847-8n96n\" (UID: \"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.621894 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs6d8\" (UniqueName: \"kubernetes.io/projected/dc698901-e923-49fa-bc7f-f4e3f9f0a99b-kube-api-access-bs6d8\") pod \"logging-loki-gateway-7d6b48847-cgx9d\" (UID: \"dc698901-e923-49fa-bc7f-f4e3f9f0a99b\") " pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.650436 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.669445 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.769493 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz"] Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.774847 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-h98vv"] Dec 10 11:02:22 crc kubenswrapper[4780]: W1210 11:02:22.837128 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c0379b8_51d2_4860_be84_18dfcb007969.slice/crio-7ea75cbf3a04935694e60dc3c8531eeb57b1ac29d8f1b6255aa3cd5548d4eca7 WatchSource:0}: Error finding container 7ea75cbf3a04935694e60dc3c8531eeb57b1ac29d8f1b6255aa3cd5548d4eca7: Status 404 returned error can't find the container with id 7ea75cbf3a04935694e60dc3c8531eeb57b1ac29d8f1b6255aa3cd5548d4eca7 Dec 10 11:02:22 crc kubenswrapper[4780]: I1210 11:02:22.884888 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.031010 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.049141 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.049304 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.055399 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.056443 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.097144 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.098346 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.101376 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.101701 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.121652 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.199884 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.203104 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.208374 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209115 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209342 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-78debacb-9295-4343-a3a6-87160fd6b11b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-78debacb-9295-4343-a3a6-87160fd6b11b\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209422 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbvrz\" (UniqueName: \"kubernetes.io/projected/2b4928b0-ec66-4cee-8fd3-2067b64c4144-kube-api-access-mbvrz\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209476 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209509 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209528 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209556 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209574 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209601 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-config\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209617 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209638 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209670 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209709 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-705dc182-e450-44c3-baae-283d2a403ff4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-705dc182-e450-44c3-baae-283d2a403ff4\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209745 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b4928b0-ec66-4cee-8fd3-2067b64c4144-config\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209940 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fx6m\" (UniqueName: \"kubernetes.io/projected/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-kube-api-access-7fx6m\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.209994 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3d1ea538-8cac-4d7d-b03a-e65c65acb462\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3d1ea538-8cac-4d7d-b03a-e65c65acb462\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.261841 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.311691 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fx6m\" (UniqueName: \"kubernetes.io/projected/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-kube-api-access-7fx6m\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.311795 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3d1ea538-8cac-4d7d-b03a-e65c65acb462\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3d1ea538-8cac-4d7d-b03a-e65c65acb462\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.311837 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.311876 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-78debacb-9295-4343-a3a6-87160fd6b11b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-78debacb-9295-4343-a3a6-87160fd6b11b\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.311961 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbvrz\" (UniqueName: \"kubernetes.io/projected/2b4928b0-ec66-4cee-8fd3-2067b64c4144-kube-api-access-mbvrz\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312036 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312203 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312284 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312333 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312470 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7c49ce20-2905-4f1f-a437-7f12a12f871d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7c49ce20-2905-4f1f-a437-7f12a12f871d\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312539 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312583 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312713 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-config\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312753 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.312819 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.313001 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.313064 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.313136 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-705dc182-e450-44c3-baae-283d2a403ff4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-705dc182-e450-44c3-baae-283d2a403ff4\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.313209 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.313275 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b4928b0-ec66-4cee-8fd3-2067b64c4144-config\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.313316 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c91b690-c1ad-4923-ad75-f7e5611441f9-config\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.313348 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5wg2\" (UniqueName: \"kubernetes.io/projected/1c91b690-c1ad-4923-ad75-f7e5611441f9-kube-api-access-n5wg2\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.315989 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b4928b0-ec66-4cee-8fd3-2067b64c4144-config\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.316299 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.316391 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-config\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.318124 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.320896 4780 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.320991 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-705dc182-e450-44c3-baae-283d2a403ff4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-705dc182-e450-44c3-baae-283d2a403ff4\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/27bc1f3b1c8629e394cb9c8df511b39c6dcabe600bbb7d37312849dff9cfe32e/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.320896 4780 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.321166 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-78debacb-9295-4343-a3a6-87160fd6b11b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-78debacb-9295-4343-a3a6-87160fd6b11b\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9e755a9b1f382b9e3b51211ac3c27ca40b66497bde0ad5b48ef2586e3e5bbb19/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.325510 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.325640 4780 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.325668 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.325707 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3d1ea538-8cac-4d7d-b03a-e65c65acb462\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3d1ea538-8cac-4d7d-b03a-e65c65acb462\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a7f7eb7d9850df5b7b6976da6878eb581c85e48fcbf16f155edfc15d9b22589f/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.325771 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.326939 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.328658 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.330143 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7d6b48847-8n96n"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.330779 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/2b4928b0-ec66-4cee-8fd3-2067b64c4144-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.342955 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fx6m\" (UniqueName: \"kubernetes.io/projected/61a3cdd1-2b93-4f37-a58e-d3d0918b60eb-kube-api-access-7fx6m\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.345910 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbvrz\" (UniqueName: \"kubernetes.io/projected/2b4928b0-ec66-4cee-8fd3-2067b64c4144-kube-api-access-mbvrz\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: W1210 11:02:23.351818 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec49e3ad_a0d4_4eaa_a8f6_6f76c1663546.slice/crio-2e8c5913375bac275e3da74b1fbbdfc1b7a83214a938eec493aabbab4b7da8fe WatchSource:0}: Error finding container 2e8c5913375bac275e3da74b1fbbdfc1b7a83214a938eec493aabbab4b7da8fe: Status 404 returned error can't find the container with id 2e8c5913375bac275e3da74b1fbbdfc1b7a83214a938eec493aabbab4b7da8fe Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.417334 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7c49ce20-2905-4f1f-a437-7f12a12f871d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7c49ce20-2905-4f1f-a437-7f12a12f871d\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.425418 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.425740 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.425890 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c91b690-c1ad-4923-ad75-f7e5611441f9-config\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.426107 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5wg2\" (UniqueName: \"kubernetes.io/projected/1c91b690-c1ad-4923-ad75-f7e5611441f9-kube-api-access-n5wg2\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.426337 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.426869 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.430748 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.430996 4780 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.431073 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7c49ce20-2905-4f1f-a437-7f12a12f871d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7c49ce20-2905-4f1f-a437-7f12a12f871d\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8e171a68ccb9443cb9649b59d0b8586d6590258a2c4f20fae2772c7a9adc0aaa/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.434621 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3d1ea538-8cac-4d7d-b03a-e65c65acb462\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3d1ea538-8cac-4d7d-b03a-e65c65acb462\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.445586 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-78debacb-9295-4343-a3a6-87160fd6b11b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-78debacb-9295-4343-a3a6-87160fd6b11b\") pod \"logging-loki-compactor-0\" (UID: \"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.446356 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c91b690-c1ad-4923-ad75-f7e5611441f9-config\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.449519 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vr2cr" event={"ID":"fa780797-49d6-443b-948f-d469f590ff89","Type":"ContainerStarted","Data":"474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d"} Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.455375 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" event={"ID":"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546","Type":"ContainerStarted","Data":"2e8c5913375bac275e3da74b1fbbdfc1b7a83214a938eec493aabbab4b7da8fe"} Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.458578 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-705dc182-e450-44c3-baae-283d2a403ff4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-705dc182-e450-44c3-baae-283d2a403ff4\") pod \"logging-loki-ingester-0\" (UID: \"2b4928b0-ec66-4cee-8fd3-2067b64c4144\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.459271 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" event={"ID":"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2","Type":"ContainerStarted","Data":"3ba11d61280862d6bab6e042da31ef0b8d2544a2a2f846d883349c7454b81c91"} Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.463815 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" event={"ID":"7c0379b8-51d2-4860-be84-18dfcb007969","Type":"ContainerStarted","Data":"7ea75cbf3a04935694e60dc3c8531eeb57b1ac29d8f1b6255aa3cd5548d4eca7"} Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.467631 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.469180 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.471168 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" event={"ID":"9906b064-40d7-43bc-bb9b-52863d99a2f4","Type":"ContainerStarted","Data":"41321504ad449f743f19fb584fc8f76caa333a1b6a625b2f13705b241d98cfc1"} Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.471908 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5wg2\" (UniqueName: \"kubernetes.io/projected/1c91b690-c1ad-4923-ad75-f7e5611441f9-kube-api-access-n5wg2\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.475383 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/1c91b690-c1ad-4923-ad75-f7e5611441f9-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.485296 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-7d6b48847-cgx9d"] Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.500605 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vr2cr" podStartSLOduration=11.451493429 podStartE2EDuration="28.500564771s" podCreationTimestamp="2025-12-10 11:01:55 +0000 UTC" firstStartedPulling="2025-12-10 11:02:05.651055379 +0000 UTC m=+1030.504448822" lastFinishedPulling="2025-12-10 11:02:22.700126721 +0000 UTC m=+1047.553520164" observedRunningTime="2025-12-10 11:02:23.480239996 +0000 UTC m=+1048.333633439" watchObservedRunningTime="2025-12-10 11:02:23.500564771 +0000 UTC m=+1048.353958214" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.521587 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7c49ce20-2905-4f1f-a437-7f12a12f871d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7c49ce20-2905-4f1f-a437-7f12a12f871d\") pod \"logging-loki-index-gateway-0\" (UID: \"1c91b690-c1ad-4923-ad75-f7e5611441f9\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.551315 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.685856 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:23 crc kubenswrapper[4780]: I1210 11:02:23.724888 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:24 crc kubenswrapper[4780]: I1210 11:02:24.050849 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 10 11:02:24 crc kubenswrapper[4780]: I1210 11:02:24.192592 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 10 11:02:24 crc kubenswrapper[4780]: W1210 11:02:24.222901 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b4928b0_ec66_4cee_8fd3_2067b64c4144.slice/crio-ed103d7d954f51aafcabf17844bc218678d928971abe01aaafaae76fc3b1e57c WatchSource:0}: Error finding container ed103d7d954f51aafcabf17844bc218678d928971abe01aaafaae76fc3b1e57c: Status 404 returned error can't find the container with id ed103d7d954f51aafcabf17844bc218678d928971abe01aaafaae76fc3b1e57c Dec 10 11:02:24 crc kubenswrapper[4780]: W1210 11:02:24.452680 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod61a3cdd1_2b93_4f37_a58e_d3d0918b60eb.slice/crio-ebf9dbda2ee97a03818469acfd2ad1216e7aba50f2fc5d5fb2894ad9876d56cd WatchSource:0}: Error finding container ebf9dbda2ee97a03818469acfd2ad1216e7aba50f2fc5d5fb2894ad9876d56cd: Status 404 returned error can't find the container with id ebf9dbda2ee97a03818469acfd2ad1216e7aba50f2fc5d5fb2894ad9876d56cd Dec 10 11:02:24 crc kubenswrapper[4780]: I1210 11:02:24.459148 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 10 11:02:24 crc kubenswrapper[4780]: I1210 11:02:24.480637 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"1c91b690-c1ad-4923-ad75-f7e5611441f9","Type":"ContainerStarted","Data":"b605506a7c9d7a544bd2198826f94f217937bdfe4d1e793a55fd4098bf1c4d72"} Dec 10 11:02:24 crc kubenswrapper[4780]: I1210 11:02:24.483331 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb","Type":"ContainerStarted","Data":"ebf9dbda2ee97a03818469acfd2ad1216e7aba50f2fc5d5fb2894ad9876d56cd"} Dec 10 11:02:24 crc kubenswrapper[4780]: I1210 11:02:24.485359 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"2b4928b0-ec66-4cee-8fd3-2067b64c4144","Type":"ContainerStarted","Data":"ed103d7d954f51aafcabf17844bc218678d928971abe01aaafaae76fc3b1e57c"} Dec 10 11:02:24 crc kubenswrapper[4780]: I1210 11:02:24.487207 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" event={"ID":"dc698901-e923-49fa-bc7f-f4e3f9f0a99b","Type":"ContainerStarted","Data":"8d643ef8058c2263ef6235ac8ecf15a53f00a1462f8417985ffe141375951056"} Dec 10 11:02:25 crc kubenswrapper[4780]: I1210 11:02:25.499404 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g66q8" event={"ID":"f898695c-3396-42b3-9bf5-0fe05c234d39","Type":"ContainerStarted","Data":"12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94"} Dec 10 11:02:26 crc kubenswrapper[4780]: I1210 11:02:26.281386 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:02:26 crc kubenswrapper[4780]: I1210 11:02:26.281821 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:02:26 crc kubenswrapper[4780]: I1210 11:02:26.398727 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:02:26 crc kubenswrapper[4780]: I1210 11:02:26.541932 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g66q8" podStartSLOduration=13.695305095 podStartE2EDuration="22.541865057s" podCreationTimestamp="2025-12-10 11:02:04 +0000 UTC" firstStartedPulling="2025-12-10 11:02:15.247124843 +0000 UTC m=+1040.100518286" lastFinishedPulling="2025-12-10 11:02:24.093684805 +0000 UTC m=+1048.947078248" observedRunningTime="2025-12-10 11:02:26.52777525 +0000 UTC m=+1051.381168693" watchObservedRunningTime="2025-12-10 11:02:26.541865057 +0000 UTC m=+1051.395258500" Dec 10 11:02:27 crc kubenswrapper[4780]: I1210 11:02:27.486366 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:02:27 crc kubenswrapper[4780]: I1210 11:02:27.487415 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:02:31 crc kubenswrapper[4780]: I1210 11:02:31.544344 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:02:31 crc kubenswrapper[4780]: I1210 11:02:31.594496 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lfzkp"] Dec 10 11:02:31 crc kubenswrapper[4780]: I1210 11:02:31.603692 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lfzkp" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerName="registry-server" containerID="cri-o://a3f6af6856818ef7acca738ce1aa462bf086d3706b74eb8ad29189e7ff22980e" gracePeriod=2 Dec 10 11:02:33 crc kubenswrapper[4780]: I1210 11:02:33.623256 4780 generic.go:334] "Generic (PLEG): container finished" podID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerID="a3f6af6856818ef7acca738ce1aa462bf086d3706b74eb8ad29189e7ff22980e" exitCode=0 Dec 10 11:02:33 crc kubenswrapper[4780]: I1210 11:02:33.623325 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lfzkp" event={"ID":"85b7225d-3a09-42e4-8fcb-50eede1ecff1","Type":"ContainerDied","Data":"a3f6af6856818ef7acca738ce1aa462bf086d3706b74eb8ad29189e7ff22980e"} Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.438108 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.518464 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-catalog-content\") pod \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.518527 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-utilities\") pod \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.518569 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxgvm\" (UniqueName: \"kubernetes.io/projected/85b7225d-3a09-42e4-8fcb-50eede1ecff1-kube-api-access-hxgvm\") pod \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\" (UID: \"85b7225d-3a09-42e4-8fcb-50eede1ecff1\") " Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.519757 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-utilities" (OuterVolumeSpecName: "utilities") pod "85b7225d-3a09-42e4-8fcb-50eede1ecff1" (UID: "85b7225d-3a09-42e4-8fcb-50eede1ecff1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.524502 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85b7225d-3a09-42e4-8fcb-50eede1ecff1-kube-api-access-hxgvm" (OuterVolumeSpecName: "kube-api-access-hxgvm") pod "85b7225d-3a09-42e4-8fcb-50eede1ecff1" (UID: "85b7225d-3a09-42e4-8fcb-50eede1ecff1"). InnerVolumeSpecName "kube-api-access-hxgvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.575035 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85b7225d-3a09-42e4-8fcb-50eede1ecff1" (UID: "85b7225d-3a09-42e4-8fcb-50eede1ecff1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.621584 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.621654 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85b7225d-3a09-42e4-8fcb-50eede1ecff1-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.621682 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxgvm\" (UniqueName: \"kubernetes.io/projected/85b7225d-3a09-42e4-8fcb-50eede1ecff1-kube-api-access-hxgvm\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.637739 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lfzkp" event={"ID":"85b7225d-3a09-42e4-8fcb-50eede1ecff1","Type":"ContainerDied","Data":"ded1efd6cc2fced64aae3adad5102e60eb292ec155e853dfc89e39f9695be431"} Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.637843 4780 scope.go:117] "RemoveContainer" containerID="a3f6af6856818ef7acca738ce1aa462bf086d3706b74eb8ad29189e7ff22980e" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.637842 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lfzkp" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.674799 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lfzkp"] Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.682600 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lfzkp"] Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.773443 4780 scope.go:117] "RemoveContainer" containerID="65a94723df02df161f73aacd5f861739e7b90ff193b3df19b9de127d29852ecb" Dec 10 11:02:34 crc kubenswrapper[4780]: I1210 11:02:34.945956 4780 scope.go:117] "RemoveContainer" containerID="e3567e710adc443ea47f4c1575e16e6ef0dd07c9fa9128b2a8d44a26090b97c1" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.211857 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.212297 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.261328 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.647731 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" event={"ID":"a59d13f6-10bb-4e7d-96c1-46dff7fac4e2","Type":"ContainerStarted","Data":"f5549be7dcdb7cf40ecc6671a7cc5a29380284aaf687147e51b1fc756b5248a8"} Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.649225 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.651546 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" event={"ID":"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546","Type":"ContainerStarted","Data":"9e7792e006c8a4ee0396c5a9b4e88e8f9132a1847b3c74cde661ccd48f100526"} Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.653033 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" event={"ID":"7c0379b8-51d2-4860-be84-18dfcb007969","Type":"ContainerStarted","Data":"e4e914d882bbf2cf7eab143506b24c0293dbe4612e3e4c61fa5283073ea84fd1"} Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.653679 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.655546 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"2b4928b0-ec66-4cee-8fd3-2067b64c4144","Type":"ContainerStarted","Data":"4a1385c909bcc4268a2b2ac5c4e1894c8a7ffedb13e84360458198c8ef7a60e8"} Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.656142 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.657607 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"1c91b690-c1ad-4923-ad75-f7e5611441f9","Type":"ContainerStarted","Data":"13d60fe6ac2b0c9de64bf767e4db2a5a0e5f311f62e1d664de8d28f218e95e28"} Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.658196 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.664059 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" event={"ID":"9906b064-40d7-43bc-bb9b-52863d99a2f4","Type":"ContainerStarted","Data":"b177b13c8c944b3b4ec4784f1898e884d79f7db9ac32784d76895f536769aa3c"} Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.664661 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.669970 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"61a3cdd1-2b93-4f37-a58e-d3d0918b60eb","Type":"ContainerStarted","Data":"2bb85c9e6bff19a396256061168e2aa8061459a7a68aa62a77b49de7dca6cbb9"} Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.673216 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.677904 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" event={"ID":"dc698901-e923-49fa-bc7f-f4e3f9f0a99b","Type":"ContainerStarted","Data":"86d548b3fcf64e2c1c326052d2c0188a20b292b3d80cbe99fd0fdf3951dab0de"} Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.704242 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" podStartSLOduration=2.549230266 podStartE2EDuration="14.70421509s" podCreationTimestamp="2025-12-10 11:02:21 +0000 UTC" firstStartedPulling="2025-12-10 11:02:22.804341024 +0000 UTC m=+1047.657734457" lastFinishedPulling="2025-12-10 11:02:34.959325828 +0000 UTC m=+1059.812719281" observedRunningTime="2025-12-10 11:02:35.69159985 +0000 UTC m=+1060.544993293" watchObservedRunningTime="2025-12-10 11:02:35.70421509 +0000 UTC m=+1060.557608533" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.752373 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=2.824620703 podStartE2EDuration="13.752339651s" podCreationTimestamp="2025-12-10 11:02:22 +0000 UTC" firstStartedPulling="2025-12-10 11:02:24.121628633 +0000 UTC m=+1048.975022076" lastFinishedPulling="2025-12-10 11:02:35.049347581 +0000 UTC m=+1059.902741024" observedRunningTime="2025-12-10 11:02:35.744499732 +0000 UTC m=+1060.597893175" watchObservedRunningTime="2025-12-10 11:02:35.752339651 +0000 UTC m=+1060.605733094" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.752873 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" podStartSLOduration=1.846144536 podStartE2EDuration="13.752866004s" podCreationTimestamp="2025-12-10 11:02:22 +0000 UTC" firstStartedPulling="2025-12-10 11:02:22.898426679 +0000 UTC m=+1047.751820122" lastFinishedPulling="2025-12-10 11:02:34.805148147 +0000 UTC m=+1059.658541590" observedRunningTime="2025-12-10 11:02:35.722671088 +0000 UTC m=+1060.576064541" watchObservedRunningTime="2025-12-10 11:02:35.752866004 +0000 UTC m=+1060.606259447" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.786536 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.97075677 podStartE2EDuration="14.786510018s" podCreationTimestamp="2025-12-10 11:02:21 +0000 UTC" firstStartedPulling="2025-12-10 11:02:24.236387564 +0000 UTC m=+1049.089781007" lastFinishedPulling="2025-12-10 11:02:35.052140812 +0000 UTC m=+1059.905534255" observedRunningTime="2025-12-10 11:02:35.781466 +0000 UTC m=+1060.634859463" watchObservedRunningTime="2025-12-10 11:02:35.786510018 +0000 UTC m=+1060.639903461" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.815043 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.21795821 podStartE2EDuration="13.81500448s" podCreationTimestamp="2025-12-10 11:02:22 +0000 UTC" firstStartedPulling="2025-12-10 11:02:24.455115522 +0000 UTC m=+1049.308508965" lastFinishedPulling="2025-12-10 11:02:35.052161792 +0000 UTC m=+1059.905555235" observedRunningTime="2025-12-10 11:02:35.802987926 +0000 UTC m=+1060.656381369" watchObservedRunningTime="2025-12-10 11:02:35.81500448 +0000 UTC m=+1060.668397923" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.907340 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.914781 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" podStartSLOduration=2.701199141 podStartE2EDuration="14.91474705s" podCreationTimestamp="2025-12-10 11:02:21 +0000 UTC" firstStartedPulling="2025-12-10 11:02:22.839976518 +0000 UTC m=+1047.693369961" lastFinishedPulling="2025-12-10 11:02:35.053524427 +0000 UTC m=+1059.906917870" observedRunningTime="2025-12-10 11:02:35.908005949 +0000 UTC m=+1060.761399422" watchObservedRunningTime="2025-12-10 11:02:35.91474705 +0000 UTC m=+1060.768140503" Dec 10 11:02:35 crc kubenswrapper[4780]: I1210 11:02:35.971415 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" path="/var/lib/kubelet/pods/85b7225d-3a09-42e4-8fcb-50eede1ecff1/volumes" Dec 10 11:02:36 crc kubenswrapper[4780]: I1210 11:02:36.419084 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:02:36 crc kubenswrapper[4780]: I1210 11:02:36.878736 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g66q8"] Dec 10 11:02:37 crc kubenswrapper[4780]: I1210 11:02:37.701039 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-g66q8" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerName="registry-server" containerID="cri-o://12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94" gracePeriod=2 Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.222795 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.306767 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfvrq\" (UniqueName: \"kubernetes.io/projected/f898695c-3396-42b3-9bf5-0fe05c234d39-kube-api-access-qfvrq\") pod \"f898695c-3396-42b3-9bf5-0fe05c234d39\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.306854 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-catalog-content\") pod \"f898695c-3396-42b3-9bf5-0fe05c234d39\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.306969 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-utilities\") pod \"f898695c-3396-42b3-9bf5-0fe05c234d39\" (UID: \"f898695c-3396-42b3-9bf5-0fe05c234d39\") " Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.308169 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-utilities" (OuterVolumeSpecName: "utilities") pod "f898695c-3396-42b3-9bf5-0fe05c234d39" (UID: "f898695c-3396-42b3-9bf5-0fe05c234d39"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.315548 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f898695c-3396-42b3-9bf5-0fe05c234d39-kube-api-access-qfvrq" (OuterVolumeSpecName: "kube-api-access-qfvrq") pod "f898695c-3396-42b3-9bf5-0fe05c234d39" (UID: "f898695c-3396-42b3-9bf5-0fe05c234d39"). InnerVolumeSpecName "kube-api-access-qfvrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.359244 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f898695c-3396-42b3-9bf5-0fe05c234d39" (UID: "f898695c-3396-42b3-9bf5-0fe05c234d39"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.408559 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfvrq\" (UniqueName: \"kubernetes.io/projected/f898695c-3396-42b3-9bf5-0fe05c234d39-kube-api-access-qfvrq\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.409308 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.409333 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f898695c-3396-42b3-9bf5-0fe05c234d39-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.679829 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vr2cr"] Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.680283 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vr2cr" podUID="fa780797-49d6-443b-948f-d469f590ff89" containerName="registry-server" containerID="cri-o://474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d" gracePeriod=2 Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.715821 4780 generic.go:334] "Generic (PLEG): container finished" podID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerID="12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94" exitCode=0 Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.715943 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g66q8" event={"ID":"f898695c-3396-42b3-9bf5-0fe05c234d39","Type":"ContainerDied","Data":"12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94"} Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.715989 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g66q8" event={"ID":"f898695c-3396-42b3-9bf5-0fe05c234d39","Type":"ContainerDied","Data":"1a6dd50b80d7c159f87a0ffe85ef142bcfe42367fbc300dedd11a791a64d54ec"} Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.716015 4780 scope.go:117] "RemoveContainer" containerID="12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.716236 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g66q8" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.723794 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" event={"ID":"dc698901-e923-49fa-bc7f-f4e3f9f0a99b","Type":"ContainerStarted","Data":"8a623610d38b3bdc180e9648ee8fdd64f97c585f9887f0fe30823c71e973152f"} Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.724156 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.724968 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.734034 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" event={"ID":"ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546","Type":"ContainerStarted","Data":"9045d1920ae69491dfa6cba244326a34f04ba7ed2a44917c2eb1de079b7435b1"} Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.735420 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.735478 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.741158 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.745388 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.747797 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.754396 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" podStartSLOduration=2.458568489 podStartE2EDuration="16.75435202s" podCreationTimestamp="2025-12-10 11:02:22 +0000 UTC" firstStartedPulling="2025-12-10 11:02:23.48672329 +0000 UTC m=+1048.340116733" lastFinishedPulling="2025-12-10 11:02:37.782506821 +0000 UTC m=+1062.635900264" observedRunningTime="2025-12-10 11:02:38.750531403 +0000 UTC m=+1063.603924876" watchObservedRunningTime="2025-12-10 11:02:38.75435202 +0000 UTC m=+1063.607745463" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.763633 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.794315 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" podStartSLOduration=2.380262904 podStartE2EDuration="16.794272553s" podCreationTimestamp="2025-12-10 11:02:22 +0000 UTC" firstStartedPulling="2025-12-10 11:02:23.358592101 +0000 UTC m=+1048.211985544" lastFinishedPulling="2025-12-10 11:02:37.77260175 +0000 UTC m=+1062.625995193" observedRunningTime="2025-12-10 11:02:38.780505013 +0000 UTC m=+1063.633898456" watchObservedRunningTime="2025-12-10 11:02:38.794272553 +0000 UTC m=+1063.647665996" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.802844 4780 scope.go:117] "RemoveContainer" containerID="6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.848131 4780 scope.go:117] "RemoveContainer" containerID="d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.872268 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g66q8"] Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.878603 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-g66q8"] Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.908095 4780 scope.go:117] "RemoveContainer" containerID="12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94" Dec 10 11:02:38 crc kubenswrapper[4780]: E1210 11:02:38.908990 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94\": container with ID starting with 12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94 not found: ID does not exist" containerID="12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.909048 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94"} err="failed to get container status \"12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94\": rpc error: code = NotFound desc = could not find container \"12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94\": container with ID starting with 12e3f61bf15c6297249cd5d3f9ceb0f6a6ece5c0350414a62156e950cb7d5f94 not found: ID does not exist" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.909082 4780 scope.go:117] "RemoveContainer" containerID="6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022" Dec 10 11:02:38 crc kubenswrapper[4780]: E1210 11:02:38.911848 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022\": container with ID starting with 6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022 not found: ID does not exist" containerID="6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.911892 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022"} err="failed to get container status \"6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022\": rpc error: code = NotFound desc = could not find container \"6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022\": container with ID starting with 6de5c3578ac31054005e7a5ba621b6053b4499de3d665df1d2ba592d7ae1c022 not found: ID does not exist" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.912009 4780 scope.go:117] "RemoveContainer" containerID="d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad" Dec 10 11:02:38 crc kubenswrapper[4780]: E1210 11:02:38.912357 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad\": container with ID starting with d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad not found: ID does not exist" containerID="d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad" Dec 10 11:02:38 crc kubenswrapper[4780]: I1210 11:02:38.912377 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad"} err="failed to get container status \"d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad\": rpc error: code = NotFound desc = could not find container \"d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad\": container with ID starting with d5c32df13fd99746fcd165de6f9108f999f7d95f85ca531d485d850a8a2b2dad not found: ID does not exist" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.550851 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.743272 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qg6m\" (UniqueName: \"kubernetes.io/projected/fa780797-49d6-443b-948f-d469f590ff89-kube-api-access-4qg6m\") pod \"fa780797-49d6-443b-948f-d469f590ff89\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.743529 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-utilities\") pod \"fa780797-49d6-443b-948f-d469f590ff89\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.743583 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-catalog-content\") pod \"fa780797-49d6-443b-948f-d469f590ff89\" (UID: \"fa780797-49d6-443b-948f-d469f590ff89\") " Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.744626 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-utilities" (OuterVolumeSpecName: "utilities") pod "fa780797-49d6-443b-948f-d469f590ff89" (UID: "fa780797-49d6-443b-948f-d469f590ff89"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.747996 4780 generic.go:334] "Generic (PLEG): container finished" podID="fa780797-49d6-443b-948f-d469f590ff89" containerID="474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d" exitCode=0 Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.749482 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vr2cr" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.749864 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa780797-49d6-443b-948f-d469f590ff89-kube-api-access-4qg6m" (OuterVolumeSpecName: "kube-api-access-4qg6m") pod "fa780797-49d6-443b-948f-d469f590ff89" (UID: "fa780797-49d6-443b-948f-d469f590ff89"). InnerVolumeSpecName "kube-api-access-4qg6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.750001 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vr2cr" event={"ID":"fa780797-49d6-443b-948f-d469f590ff89","Type":"ContainerDied","Data":"474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d"} Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.750068 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vr2cr" event={"ID":"fa780797-49d6-443b-948f-d469f590ff89","Type":"ContainerDied","Data":"ec5c910316187f66dcda77986ffe54dbe334f732d4e754a3c44c9e5b9fe2b900"} Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.750095 4780 scope.go:117] "RemoveContainer" containerID="474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.767231 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa780797-49d6-443b-948f-d469f590ff89" (UID: "fa780797-49d6-443b-948f-d469f590ff89"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.767682 4780 scope.go:117] "RemoveContainer" containerID="2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.789471 4780 scope.go:117] "RemoveContainer" containerID="9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.812664 4780 scope.go:117] "RemoveContainer" containerID="474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d" Dec 10 11:02:39 crc kubenswrapper[4780]: E1210 11:02:39.814647 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d\": container with ID starting with 474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d not found: ID does not exist" containerID="474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.814750 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d"} err="failed to get container status \"474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d\": rpc error: code = NotFound desc = could not find container \"474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d\": container with ID starting with 474c6a77d4a280d09613bae3a0d5f2ad2025cc287e7ea84bd501620fefb1853d not found: ID does not exist" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.814810 4780 scope.go:117] "RemoveContainer" containerID="2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73" Dec 10 11:02:39 crc kubenswrapper[4780]: E1210 11:02:39.815632 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73\": container with ID starting with 2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73 not found: ID does not exist" containerID="2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.815705 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73"} err="failed to get container status \"2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73\": rpc error: code = NotFound desc = could not find container \"2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73\": container with ID starting with 2f48a3cf28616a423542d182ad98f7964b023463e7c960da12740f4dae9c9b73 not found: ID does not exist" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.815756 4780 scope.go:117] "RemoveContainer" containerID="9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592" Dec 10 11:02:39 crc kubenswrapper[4780]: E1210 11:02:39.816484 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592\": container with ID starting with 9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592 not found: ID does not exist" containerID="9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.816525 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592"} err="failed to get container status \"9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592\": rpc error: code = NotFound desc = could not find container \"9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592\": container with ID starting with 9e0cad36a10ca6b02e1208810f2718ff564241e220d4f4f88d90cac7b19cc592 not found: ID does not exist" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.846013 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.846874 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa780797-49d6-443b-948f-d469f590ff89-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.847006 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qg6m\" (UniqueName: \"kubernetes.io/projected/fa780797-49d6-443b-948f-d469f590ff89-kube-api-access-4qg6m\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:39 crc kubenswrapper[4780]: I1210 11:02:39.969975 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" path="/var/lib/kubelet/pods/f898695c-3396-42b3-9bf5-0fe05c234d39/volumes" Dec 10 11:02:40 crc kubenswrapper[4780]: I1210 11:02:40.083680 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vr2cr"] Dec 10 11:02:40 crc kubenswrapper[4780]: I1210 11:02:40.091778 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vr2cr"] Dec 10 11:02:41 crc kubenswrapper[4780]: I1210 11:02:41.970803 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa780797-49d6-443b-948f-d469f590ff89" path="/var/lib/kubelet/pods/fa780797-49d6-443b-948f-d469f590ff89/volumes" Dec 10 11:02:52 crc kubenswrapper[4780]: I1210 11:02:52.115096 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-j5hdz" Dec 10 11:02:52 crc kubenswrapper[4780]: I1210 11:02:52.399114 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-h98vv" Dec 10 11:02:52 crc kubenswrapper[4780]: I1210 11:02:52.513646 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-tjd86" Dec 10 11:02:53 crc kubenswrapper[4780]: I1210 11:02:53.557648 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 11:02:53 crc kubenswrapper[4780]: I1210 11:02:53.735164 4780 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Dec 10 11:02:53 crc kubenswrapper[4780]: I1210 11:02:53.735288 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="2b4928b0-ec66-4cee-8fd3-2067b64c4144" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 11:02:53 crc kubenswrapper[4780]: I1210 11:02:53.832575 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Dec 10 11:02:57 crc kubenswrapper[4780]: I1210 11:02:57.476232 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:02:57 crc kubenswrapper[4780]: I1210 11:02:57.476746 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:02:57 crc kubenswrapper[4780]: I1210 11:02:57.476887 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:02:57 crc kubenswrapper[4780]: I1210 11:02:57.477912 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ed771cb9f33bfa44077ddff43b64d4340b6f781baf12fbbaaac2b0023588cc1c"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:02:57 crc kubenswrapper[4780]: I1210 11:02:57.478045 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://ed771cb9f33bfa44077ddff43b64d4340b6f781baf12fbbaaac2b0023588cc1c" gracePeriod=600 Dec 10 11:02:57 crc kubenswrapper[4780]: I1210 11:02:57.934720 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="ed771cb9f33bfa44077ddff43b64d4340b6f781baf12fbbaaac2b0023588cc1c" exitCode=0 Dec 10 11:02:57 crc kubenswrapper[4780]: I1210 11:02:57.934791 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"ed771cb9f33bfa44077ddff43b64d4340b6f781baf12fbbaaac2b0023588cc1c"} Dec 10 11:02:57 crc kubenswrapper[4780]: I1210 11:02:57.934933 4780 scope.go:117] "RemoveContainer" containerID="3fbfe685041c9fd303141118710f14f576d10f3417446140048debbbb20a3ef0" Dec 10 11:02:58 crc kubenswrapper[4780]: I1210 11:02:58.946736 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"2a20f929d413d862ac186ee1144a2d1d554405829efe199ddb0dfa3f0f9ae340"} Dec 10 11:03:03 crc kubenswrapper[4780]: I1210 11:03:03.693434 4780 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Dec 10 11:03:03 crc kubenswrapper[4780]: I1210 11:03:03.694094 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="2b4928b0-ec66-4cee-8fd3-2067b64c4144" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 11:03:13 crc kubenswrapper[4780]: I1210 11:03:13.693122 4780 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Dec 10 11:03:13 crc kubenswrapper[4780]: I1210 11:03:13.693770 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="2b4928b0-ec66-4cee-8fd3-2067b64c4144" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 11:03:23 crc kubenswrapper[4780]: I1210 11:03:23.696906 4780 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Dec 10 11:03:23 crc kubenswrapper[4780]: I1210 11:03:23.698247 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="2b4928b0-ec66-4cee-8fd3-2067b64c4144" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 11:03:33 crc kubenswrapper[4780]: I1210 11:03:33.691169 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.559789 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-t9hxr"] Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.560938 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerName="extract-utilities" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.560968 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerName="extract-utilities" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.561003 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa780797-49d6-443b-948f-d469f590ff89" containerName="extract-content" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561011 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa780797-49d6-443b-948f-d469f590ff89" containerName="extract-content" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.561023 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa780797-49d6-443b-948f-d469f590ff89" containerName="extract-utilities" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561030 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa780797-49d6-443b-948f-d469f590ff89" containerName="extract-utilities" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.561042 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561049 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.561060 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerName="extract-utilities" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561066 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerName="extract-utilities" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.561078 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561084 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.561097 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerName="extract-content" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561103 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerName="extract-content" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.561114 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa780797-49d6-443b-948f-d469f590ff89" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561120 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa780797-49d6-443b-948f-d469f590ff89" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.561133 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerName="extract-content" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561139 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerName="extract-content" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561325 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa780797-49d6-443b-948f-d469f590ff89" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561350 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="85b7225d-3a09-42e4-8fcb-50eede1ecff1" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.561360 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f898695c-3396-42b3-9bf5-0fe05c234d39" containerName="registry-server" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.562149 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.567949 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.569405 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.569546 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-mhchm" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.569655 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.569815 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.575953 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.578796 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.578977 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-token\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579034 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579079 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config-openshift-service-cacrt\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579127 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579163 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/42d2f75b-129e-4f56-bc30-cc186ab1cc43-tmp\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579195 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-trusted-ca\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579237 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-sa-token\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579278 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/42d2f75b-129e-4f56-bc30-cc186ab1cc43-datadir\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579329 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d6qc\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-kube-api-access-5d6qc\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.579358 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-entrypoint\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.588173 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-t9hxr"] Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.636947 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-t9hxr"] Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.637827 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-5d6qc metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-t9hxr" podUID="42d2f75b-129e-4f56-bc30-cc186ab1cc43" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681100 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config-openshift-service-cacrt\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681212 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681252 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/42d2f75b-129e-4f56-bc30-cc186ab1cc43-tmp\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681283 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-trusted-ca\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681312 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-sa-token\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681336 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/42d2f75b-129e-4f56-bc30-cc186ab1cc43-datadir\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681411 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d6qc\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-kube-api-access-5d6qc\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681453 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-entrypoint\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681503 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681563 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-token\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.681592 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.681776 4780 secret.go:188] Couldn't get secret openshift-logging/collector-metrics: secret "collector-metrics" not found Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.681982 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics podName:42d2f75b-129e-4f56-bc30-cc186ab1cc43 nodeName:}" failed. No retries permitted until 2025-12-10 11:03:42.181859602 +0000 UTC m=+1127.035253045 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics" (UniqueName: "kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics") pod "collector-t9hxr" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43") : secret "collector-metrics" not found Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.682537 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/42d2f75b-129e-4f56-bc30-cc186ab1cc43-datadir\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.683381 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config-openshift-service-cacrt\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.683483 4780 secret.go:188] Couldn't get secret openshift-logging/collector-syslog-receiver: secret "collector-syslog-receiver" not found Dec 10 11:03:41 crc kubenswrapper[4780]: E1210 11:03:41.683526 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver podName:42d2f75b-129e-4f56-bc30-cc186ab1cc43 nodeName:}" failed. No retries permitted until 2025-12-10 11:03:42.183510484 +0000 UTC m=+1127.036903927 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "collector-syslog-receiver" (UniqueName: "kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver") pod "collector-t9hxr" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43") : secret "collector-syslog-receiver" not found Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.683841 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-entrypoint\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.684622 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.685848 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-trusted-ca\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.692374 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/42d2f75b-129e-4f56-bc30-cc186ab1cc43-tmp\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.702116 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-token\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.705289 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d6qc\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-kube-api-access-5d6qc\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:41 crc kubenswrapper[4780]: I1210 11:03:41.707807 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-sa-token\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.189854 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.190781 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.196326 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.197067 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics\") pod \"collector-t9hxr\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " pod="openshift-logging/collector-t9hxr" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.483022 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-t9hxr" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.505898 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-t9hxr" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.705988 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config-openshift-service-cacrt\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.707038 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5d6qc\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-kube-api-access-5d6qc\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.707193 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/42d2f75b-129e-4f56-bc30-cc186ab1cc43-tmp\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.707315 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-entrypoint\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.707501 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-sa-token\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.707622 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-trusted-ca\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.707733 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.707899 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.708067 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-token\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.708183 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/42d2f75b-129e-4f56-bc30-cc186ab1cc43-datadir\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.708285 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver\") pod \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\" (UID: \"42d2f75b-129e-4f56-bc30-cc186ab1cc43\") " Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.706692 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.708842 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42d2f75b-129e-4f56-bc30-cc186ab1cc43-datadir" (OuterVolumeSpecName: "datadir") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.709338 4780 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/42d2f75b-129e-4f56-bc30-cc186ab1cc43-datadir\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.709547 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config" (OuterVolumeSpecName: "config") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.709794 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.710532 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.719300 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-token" (OuterVolumeSpecName: "collector-token") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.719365 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.719380 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-kube-api-access-5d6qc" (OuterVolumeSpecName: "kube-api-access-5d6qc") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "kube-api-access-5d6qc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.719382 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42d2f75b-129e-4f56-bc30-cc186ab1cc43-tmp" (OuterVolumeSpecName: "tmp") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.719405 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-sa-token" (OuterVolumeSpecName: "sa-token") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.726298 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics" (OuterVolumeSpecName: "metrics") pod "42d2f75b-129e-4f56-bc30-cc186ab1cc43" (UID: "42d2f75b-129e-4f56-bc30-cc186ab1cc43"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.810854 4780 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811206 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811284 4780 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-metrics\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811375 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811475 4780 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-token\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811557 4780 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/42d2f75b-129e-4f56-bc30-cc186ab1cc43-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811643 4780 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811712 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5d6qc\" (UniqueName: \"kubernetes.io/projected/42d2f75b-129e-4f56-bc30-cc186ab1cc43-kube-api-access-5d6qc\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811779 4780 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/42d2f75b-129e-4f56-bc30-cc186ab1cc43-tmp\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:42 crc kubenswrapper[4780]: I1210 11:03:42.811845 4780 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/42d2f75b-129e-4f56-bc30-cc186ab1cc43-entrypoint\") on node \"crc\" DevicePath \"\"" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.496280 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-t9hxr" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.557989 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-t9hxr"] Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.566400 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-t9hxr"] Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.581704 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-mbb2k"] Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.583507 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.587030 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-mhchm" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.587274 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.590230 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.590808 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.591392 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.603192 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-mbb2k"] Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.604131 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.727542 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8f1006aa-0bb6-451f-9096-75c6e760d2db-tmp\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.727652 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-config-openshift-service-cacrt\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.727710 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-metrics\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.727847 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/8f1006aa-0bb6-451f-9096-75c6e760d2db-sa-token\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.727971 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-trusted-ca\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.728008 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-collector-syslog-receiver\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.728128 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-config\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.728191 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-collector-token\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.728284 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/8f1006aa-0bb6-451f-9096-75c6e760d2db-datadir\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.728318 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-entrypoint\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.728341 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5szd\" (UniqueName: \"kubernetes.io/projected/8f1006aa-0bb6-451f-9096-75c6e760d2db-kube-api-access-p5szd\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.829892 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-collector-token\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830067 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/8f1006aa-0bb6-451f-9096-75c6e760d2db-datadir\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830103 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-entrypoint\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830132 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5szd\" (UniqueName: \"kubernetes.io/projected/8f1006aa-0bb6-451f-9096-75c6e760d2db-kube-api-access-p5szd\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830188 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8f1006aa-0bb6-451f-9096-75c6e760d2db-tmp\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830264 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-config-openshift-service-cacrt\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830329 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-metrics\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830383 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/8f1006aa-0bb6-451f-9096-75c6e760d2db-sa-token\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830417 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-trusted-ca\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830463 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-collector-syslog-receiver\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.830512 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-config\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.831678 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/8f1006aa-0bb6-451f-9096-75c6e760d2db-datadir\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.835349 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8f1006aa-0bb6-451f-9096-75c6e760d2db-tmp\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.836987 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-entrypoint\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.837323 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-config\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.841828 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-collector-syslog-receiver\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.841878 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-metrics\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.841995 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-trusted-ca\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.842154 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/8f1006aa-0bb6-451f-9096-75c6e760d2db-collector-token\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.842317 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/8f1006aa-0bb6-451f-9096-75c6e760d2db-config-openshift-service-cacrt\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.853810 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/8f1006aa-0bb6-451f-9096-75c6e760d2db-sa-token\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.858786 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5szd\" (UniqueName: \"kubernetes.io/projected/8f1006aa-0bb6-451f-9096-75c6e760d2db-kube-api-access-p5szd\") pod \"collector-mbb2k\" (UID: \"8f1006aa-0bb6-451f-9096-75c6e760d2db\") " pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.926255 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-mbb2k" Dec 10 11:03:43 crc kubenswrapper[4780]: I1210 11:03:43.970393 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42d2f75b-129e-4f56-bc30-cc186ab1cc43" path="/var/lib/kubelet/pods/42d2f75b-129e-4f56-bc30-cc186ab1cc43/volumes" Dec 10 11:03:44 crc kubenswrapper[4780]: I1210 11:03:44.197372 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-mbb2k"] Dec 10 11:03:44 crc kubenswrapper[4780]: I1210 11:03:44.219113 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:03:44 crc kubenswrapper[4780]: I1210 11:03:44.507044 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-mbb2k" event={"ID":"8f1006aa-0bb6-451f-9096-75c6e760d2db","Type":"ContainerStarted","Data":"cb464c714a572021f53976f4c8307518716729fe9286e9b1a4ca49eaa9195215"} Dec 10 11:03:53 crc kubenswrapper[4780]: I1210 11:03:53.840278 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-mbb2k" event={"ID":"8f1006aa-0bb6-451f-9096-75c6e760d2db","Type":"ContainerStarted","Data":"53289cd5b4531bd845864a457d46da1756225844cc383ec59acbf2f2530f528b"} Dec 10 11:03:53 crc kubenswrapper[4780]: I1210 11:03:53.875743 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-mbb2k" podStartSLOduration=2.030018443 podStartE2EDuration="10.875684339s" podCreationTimestamp="2025-12-10 11:03:43 +0000 UTC" firstStartedPulling="2025-12-10 11:03:44.218517432 +0000 UTC m=+1129.071910875" lastFinishedPulling="2025-12-10 11:03:53.064183328 +0000 UTC m=+1137.917576771" observedRunningTime="2025-12-10 11:03:53.866404706 +0000 UTC m=+1138.719798169" watchObservedRunningTime="2025-12-10 11:03:53.875684339 +0000 UTC m=+1138.729077782" Dec 10 11:04:27 crc kubenswrapper[4780]: I1210 11:04:27.981462 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp"] Dec 10 11:04:27 crc kubenswrapper[4780]: I1210 11:04:27.985940 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:27 crc kubenswrapper[4780]: I1210 11:04:27.988468 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.010051 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp"] Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.063355 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr2th\" (UniqueName: \"kubernetes.io/projected/ed11f272-72d5-4d43-a169-f122ee540562-kube-api-access-gr2th\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.063873 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.064061 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.165333 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.165682 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr2th\" (UniqueName: \"kubernetes.io/projected/ed11f272-72d5-4d43-a169-f122ee540562-kube-api-access-gr2th\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.165944 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.166065 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.166562 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.186463 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr2th\" (UniqueName: \"kubernetes.io/projected/ed11f272-72d5-4d43-a169-f122ee540562-kube-api-access-gr2th\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.306594 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:28 crc kubenswrapper[4780]: I1210 11:04:28.829462 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp"] Dec 10 11:04:29 crc kubenswrapper[4780]: I1210 11:04:29.513087 4780 generic.go:334] "Generic (PLEG): container finished" podID="ed11f272-72d5-4d43-a169-f122ee540562" containerID="c175c37b30d3be1d3fde354715af0dc2ebbc3a210372fc52be5f36ae66019948" exitCode=0 Dec 10 11:04:29 crc kubenswrapper[4780]: I1210 11:04:29.513144 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" event={"ID":"ed11f272-72d5-4d43-a169-f122ee540562","Type":"ContainerDied","Data":"c175c37b30d3be1d3fde354715af0dc2ebbc3a210372fc52be5f36ae66019948"} Dec 10 11:04:29 crc kubenswrapper[4780]: I1210 11:04:29.513179 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" event={"ID":"ed11f272-72d5-4d43-a169-f122ee540562","Type":"ContainerStarted","Data":"dd23dac576179ef0b370d8595e3885f8177fc6dab1881f76ae58206a238968bd"} Dec 10 11:04:32 crc kubenswrapper[4780]: I1210 11:04:32.749080 4780 generic.go:334] "Generic (PLEG): container finished" podID="ed11f272-72d5-4d43-a169-f122ee540562" containerID="35d262835886c44c1a073c599fde36d4f42da9d1a56c77515641f6c436399556" exitCode=0 Dec 10 11:04:32 crc kubenswrapper[4780]: I1210 11:04:32.749177 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" event={"ID":"ed11f272-72d5-4d43-a169-f122ee540562","Type":"ContainerDied","Data":"35d262835886c44c1a073c599fde36d4f42da9d1a56c77515641f6c436399556"} Dec 10 11:04:33 crc kubenswrapper[4780]: I1210 11:04:33.764285 4780 generic.go:334] "Generic (PLEG): container finished" podID="ed11f272-72d5-4d43-a169-f122ee540562" containerID="ea4bee3bbd7cc4991a2fa1380c3220bc21660f6889a030b8d475b91408bb3210" exitCode=0 Dec 10 11:04:33 crc kubenswrapper[4780]: I1210 11:04:33.764349 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" event={"ID":"ed11f272-72d5-4d43-a169-f122ee540562","Type":"ContainerDied","Data":"ea4bee3bbd7cc4991a2fa1380c3220bc21660f6889a030b8d475b91408bb3210"} Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.062175 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.357463 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-util\") pod \"ed11f272-72d5-4d43-a169-f122ee540562\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.357568 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-bundle\") pod \"ed11f272-72d5-4d43-a169-f122ee540562\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.357724 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gr2th\" (UniqueName: \"kubernetes.io/projected/ed11f272-72d5-4d43-a169-f122ee540562-kube-api-access-gr2th\") pod \"ed11f272-72d5-4d43-a169-f122ee540562\" (UID: \"ed11f272-72d5-4d43-a169-f122ee540562\") " Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.360332 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-bundle" (OuterVolumeSpecName: "bundle") pod "ed11f272-72d5-4d43-a169-f122ee540562" (UID: "ed11f272-72d5-4d43-a169-f122ee540562"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.368243 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed11f272-72d5-4d43-a169-f122ee540562-kube-api-access-gr2th" (OuterVolumeSpecName: "kube-api-access-gr2th") pod "ed11f272-72d5-4d43-a169-f122ee540562" (UID: "ed11f272-72d5-4d43-a169-f122ee540562"). InnerVolumeSpecName "kube-api-access-gr2th". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.372565 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gr2th\" (UniqueName: \"kubernetes.io/projected/ed11f272-72d5-4d43-a169-f122ee540562-kube-api-access-gr2th\") on node \"crc\" DevicePath \"\"" Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.372605 4780 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.384128 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-util" (OuterVolumeSpecName: "util") pod "ed11f272-72d5-4d43-a169-f122ee540562" (UID: "ed11f272-72d5-4d43-a169-f122ee540562"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.474444 4780 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed11f272-72d5-4d43-a169-f122ee540562-util\") on node \"crc\" DevicePath \"\"" Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.781836 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" event={"ID":"ed11f272-72d5-4d43-a169-f122ee540562","Type":"ContainerDied","Data":"dd23dac576179ef0b370d8595e3885f8177fc6dab1881f76ae58206a238968bd"} Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.781907 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp" Dec 10 11:04:35 crc kubenswrapper[4780]: I1210 11:04:35.782399 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd23dac576179ef0b370d8595e3885f8177fc6dab1881f76ae58206a238968bd" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.530268 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6"] Dec 10 11:04:40 crc kubenswrapper[4780]: E1210 11:04:40.531824 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed11f272-72d5-4d43-a169-f122ee540562" containerName="util" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.531850 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed11f272-72d5-4d43-a169-f122ee540562" containerName="util" Dec 10 11:04:40 crc kubenswrapper[4780]: E1210 11:04:40.531865 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed11f272-72d5-4d43-a169-f122ee540562" containerName="extract" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.531872 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed11f272-72d5-4d43-a169-f122ee540562" containerName="extract" Dec 10 11:04:40 crc kubenswrapper[4780]: E1210 11:04:40.531882 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed11f272-72d5-4d43-a169-f122ee540562" containerName="pull" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.531889 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed11f272-72d5-4d43-a169-f122ee540562" containerName="pull" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.532156 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed11f272-72d5-4d43-a169-f122ee540562" containerName="extract" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.533197 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.535493 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-k8jr4" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.535984 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.536191 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.552969 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6"] Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.726685 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5hgx\" (UniqueName: \"kubernetes.io/projected/3e4022f3-ad78-4495-beb0-74c4274026d5-kube-api-access-v5hgx\") pod \"nmstate-operator-5b5b58f5c8-jchz6\" (UID: \"3e4022f3-ad78-4495-beb0-74c4274026d5\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.828205 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5hgx\" (UniqueName: \"kubernetes.io/projected/3e4022f3-ad78-4495-beb0-74c4274026d5-kube-api-access-v5hgx\") pod \"nmstate-operator-5b5b58f5c8-jchz6\" (UID: \"3e4022f3-ad78-4495-beb0-74c4274026d5\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.849543 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5hgx\" (UniqueName: \"kubernetes.io/projected/3e4022f3-ad78-4495-beb0-74c4274026d5-kube-api-access-v5hgx\") pod \"nmstate-operator-5b5b58f5c8-jchz6\" (UID: \"3e4022f3-ad78-4495-beb0-74c4274026d5\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6" Dec 10 11:04:40 crc kubenswrapper[4780]: I1210 11:04:40.860181 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6" Dec 10 11:04:41 crc kubenswrapper[4780]: I1210 11:04:41.256730 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6"] Dec 10 11:04:41 crc kubenswrapper[4780]: I1210 11:04:41.862249 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6" event={"ID":"3e4022f3-ad78-4495-beb0-74c4274026d5","Type":"ContainerStarted","Data":"f0b10229f593f573f5431cd35814a7524ea61909b846583e69421b922f36beab"} Dec 10 11:04:46 crc kubenswrapper[4780]: I1210 11:04:46.903095 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6" event={"ID":"3e4022f3-ad78-4495-beb0-74c4274026d5","Type":"ContainerStarted","Data":"c046607dcb7f8766647e8a785b363e0720919317a40b6c4c0786244d963ebcd1"} Dec 10 11:04:46 crc kubenswrapper[4780]: I1210 11:04:46.926189 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-jchz6" podStartSLOduration=2.484107443 podStartE2EDuration="6.926168463s" podCreationTimestamp="2025-12-10 11:04:40 +0000 UTC" firstStartedPulling="2025-12-10 11:04:41.270737445 +0000 UTC m=+1186.124130888" lastFinishedPulling="2025-12-10 11:04:45.712798465 +0000 UTC m=+1190.566191908" observedRunningTime="2025-12-10 11:04:46.922209484 +0000 UTC m=+1191.775602937" watchObservedRunningTime="2025-12-10 11:04:46.926168463 +0000 UTC m=+1191.779561906" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.376800 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.378590 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.381775 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-rmplj" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.387984 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.390368 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.394587 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.396860 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.424277 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.437510 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-jx5fq"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.442622 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.561768 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-nmstate-lock\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.562093 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbdcj\" (UniqueName: \"kubernetes.io/projected/280c4015-3911-4b5b-b794-23e319640bd0-kube-api-access-jbdcj\") pod \"nmstate-metrics-7f946cbc9-7dzr2\" (UID: \"280c4015-3911-4b5b-b794-23e319640bd0\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.562167 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-ovs-socket\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.562209 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/52bc8018-7b47-4053-8bc1-3b686af14adf-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-vqlpg\" (UID: \"52bc8018-7b47-4053-8bc1-3b686af14adf\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.562273 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-dbus-socket\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.562337 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ghmk\" (UniqueName: \"kubernetes.io/projected/52bc8018-7b47-4053-8bc1-3b686af14adf-kube-api-access-5ghmk\") pod \"nmstate-webhook-5f6d4c5ccb-vqlpg\" (UID: \"52bc8018-7b47-4053-8bc1-3b686af14adf\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.562442 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjgrp\" (UniqueName: \"kubernetes.io/projected/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-kube-api-access-jjgrp\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.592862 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.594006 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.596743 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.596816 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-74vlv" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.600177 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.616524 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.665036 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjgrp\" (UniqueName: \"kubernetes.io/projected/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-kube-api-access-jjgrp\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.665490 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-nmstate-lock\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.665638 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-nmstate-lock\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.665871 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbdcj\" (UniqueName: \"kubernetes.io/projected/280c4015-3911-4b5b-b794-23e319640bd0-kube-api-access-jbdcj\") pod \"nmstate-metrics-7f946cbc9-7dzr2\" (UID: \"280c4015-3911-4b5b-b794-23e319640bd0\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.665901 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-ovs-socket\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.665955 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/52bc8018-7b47-4053-8bc1-3b686af14adf-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-vqlpg\" (UID: \"52bc8018-7b47-4053-8bc1-3b686af14adf\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.666025 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-ovs-socket\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.666047 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-dbus-socket\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.666073 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ghmk\" (UniqueName: \"kubernetes.io/projected/52bc8018-7b47-4053-8bc1-3b686af14adf-kube-api-access-5ghmk\") pod \"nmstate-webhook-5f6d4c5ccb-vqlpg\" (UID: \"52bc8018-7b47-4053-8bc1-3b686af14adf\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:48 crc kubenswrapper[4780]: E1210 11:04:48.666190 4780 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Dec 10 11:04:48 crc kubenswrapper[4780]: E1210 11:04:48.666268 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/52bc8018-7b47-4053-8bc1-3b686af14adf-tls-key-pair podName:52bc8018-7b47-4053-8bc1-3b686af14adf nodeName:}" failed. No retries permitted until 2025-12-10 11:04:49.166236795 +0000 UTC m=+1194.019630238 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/52bc8018-7b47-4053-8bc1-3b686af14adf-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-vqlpg" (UID: "52bc8018-7b47-4053-8bc1-3b686af14adf") : secret "openshift-nmstate-webhook" not found Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.666811 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-dbus-socket\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.697849 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjgrp\" (UniqueName: \"kubernetes.io/projected/e195bedf-9712-4f1f-a9f7-9f4dabdd710b-kube-api-access-jjgrp\") pod \"nmstate-handler-jx5fq\" (UID: \"e195bedf-9712-4f1f-a9f7-9f4dabdd710b\") " pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.698017 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbdcj\" (UniqueName: \"kubernetes.io/projected/280c4015-3911-4b5b-b794-23e319640bd0-kube-api-access-jbdcj\") pod \"nmstate-metrics-7f946cbc9-7dzr2\" (UID: \"280c4015-3911-4b5b-b794-23e319640bd0\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.702761 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ghmk\" (UniqueName: \"kubernetes.io/projected/52bc8018-7b47-4053-8bc1-3b686af14adf-kube-api-access-5ghmk\") pod \"nmstate-webhook-5f6d4c5ccb-vqlpg\" (UID: \"52bc8018-7b47-4053-8bc1-3b686af14adf\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.705642 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.771512 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.772193 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.772232 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9v52\" (UniqueName: \"kubernetes.io/projected/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-kube-api-access-x9v52\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.772694 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:04:48 crc kubenswrapper[4780]: W1210 11:04:48.855081 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode195bedf_9712_4f1f_a9f7_9f4dabdd710b.slice/crio-243b94d52e167a7752029b9b8dd9df12ff0c111d4beee50dd4d63e4f67c4fb6a WatchSource:0}: Error finding container 243b94d52e167a7752029b9b8dd9df12ff0c111d4beee50dd4d63e4f67c4fb6a: Status 404 returned error can't find the container with id 243b94d52e167a7752029b9b8dd9df12ff0c111d4beee50dd4d63e4f67c4fb6a Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.874439 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.874538 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.874577 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9v52\" (UniqueName: \"kubernetes.io/projected/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-kube-api-access-x9v52\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.875788 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.881613 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-56bd7bfcb4-rl4rr"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.897873 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.902178 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.933170 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9v52\" (UniqueName: \"kubernetes.io/projected/ea0c76a7-e4a2-479c-8aa3-76ec59ce572b-kube-api-access-x9v52\") pod \"nmstate-console-plugin-7fbb5f6569-b9mk2\" (UID: \"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.938846 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-56bd7bfcb4-rl4rr"] Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.992893 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-serving-cert\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.993561 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-service-ca\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.993720 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-trusted-ca-bundle\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.993816 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-oauth-serving-cert\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.994692 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-oauth-config\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.994906 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-console-config\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:48 crc kubenswrapper[4780]: I1210 11:04:48.996796 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-jx5fq" event={"ID":"e195bedf-9712-4f1f-a9f7-9f4dabdd710b","Type":"ContainerStarted","Data":"243b94d52e167a7752029b9b8dd9df12ff0c111d4beee50dd4d63e4f67c4fb6a"} Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.098797 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-trusted-ca-bundle\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.098882 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-oauth-serving-cert\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.098936 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-oauth-config\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.098962 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htdcj\" (UniqueName: \"kubernetes.io/projected/21a7de0d-bac7-4258-8062-414b665097c9-kube-api-access-htdcj\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.099805 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-console-config\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.099861 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-serving-cert\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.100017 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-service-ca\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.101470 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-oauth-serving-cert\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.102875 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-trusted-ca-bundle\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.103067 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-console-config\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.103010 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-service-ca\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.106263 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-serving-cert\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.111477 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-oauth-config\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.202121 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htdcj\" (UniqueName: \"kubernetes.io/projected/21a7de0d-bac7-4258-8062-414b665097c9-kube-api-access-htdcj\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.202245 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/52bc8018-7b47-4053-8bc1-3b686af14adf-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-vqlpg\" (UID: \"52bc8018-7b47-4053-8bc1-3b686af14adf\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.208656 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/52bc8018-7b47-4053-8bc1-3b686af14adf-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-vqlpg\" (UID: \"52bc8018-7b47-4053-8bc1-3b686af14adf\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.226324 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htdcj\" (UniqueName: \"kubernetes.io/projected/21a7de0d-bac7-4258-8062-414b665097c9-kube-api-access-htdcj\") pod \"console-56bd7bfcb4-rl4rr\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.229874 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.238894 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.374102 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.457785 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2"] Dec 10 11:04:49 crc kubenswrapper[4780]: I1210 11:04:49.896650 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2"] Dec 10 11:04:49 crc kubenswrapper[4780]: W1210 11:04:49.901216 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea0c76a7_e4a2_479c_8aa3_76ec59ce572b.slice/crio-9a2827dbc3a1ec6a36002b16f98172885aefa4cba0cd195a420bab8c66f4430b WatchSource:0}: Error finding container 9a2827dbc3a1ec6a36002b16f98172885aefa4cba0cd195a420bab8c66f4430b: Status 404 returned error can't find the container with id 9a2827dbc3a1ec6a36002b16f98172885aefa4cba0cd195a420bab8c66f4430b Dec 10 11:04:50 crc kubenswrapper[4780]: I1210 11:04:50.006530 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" event={"ID":"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b","Type":"ContainerStarted","Data":"9a2827dbc3a1ec6a36002b16f98172885aefa4cba0cd195a420bab8c66f4430b"} Dec 10 11:04:50 crc kubenswrapper[4780]: I1210 11:04:50.008190 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" event={"ID":"280c4015-3911-4b5b-b794-23e319640bd0","Type":"ContainerStarted","Data":"6d4113a7d109e111bf88a2900d8b1f37295eb4bbe521d13e300615d0adf5eac1"} Dec 10 11:04:50 crc kubenswrapper[4780]: I1210 11:04:50.009506 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-56bd7bfcb4-rl4rr"] Dec 10 11:04:50 crc kubenswrapper[4780]: W1210 11:04:50.014328 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21a7de0d_bac7_4258_8062_414b665097c9.slice/crio-2c7b4c866e626ba5957056aa05c26c06f996890c5c81c1eea95d8e61f92a99d0 WatchSource:0}: Error finding container 2c7b4c866e626ba5957056aa05c26c06f996890c5c81c1eea95d8e61f92a99d0: Status 404 returned error can't find the container with id 2c7b4c866e626ba5957056aa05c26c06f996890c5c81c1eea95d8e61f92a99d0 Dec 10 11:04:50 crc kubenswrapper[4780]: I1210 11:04:50.071702 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg"] Dec 10 11:04:50 crc kubenswrapper[4780]: W1210 11:04:50.077520 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52bc8018_7b47_4053_8bc1_3b686af14adf.slice/crio-e378bb99ddf1c3008c95138a68429d935bd63b4a79f438f3f26a8b6e2fb2d111 WatchSource:0}: Error finding container e378bb99ddf1c3008c95138a68429d935bd63b4a79f438f3f26a8b6e2fb2d111: Status 404 returned error can't find the container with id e378bb99ddf1c3008c95138a68429d935bd63b4a79f438f3f26a8b6e2fb2d111 Dec 10 11:04:51 crc kubenswrapper[4780]: I1210 11:04:51.347299 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-56bd7bfcb4-rl4rr" event={"ID":"21a7de0d-bac7-4258-8062-414b665097c9","Type":"ContainerStarted","Data":"4b6362f6257a92f65062a8984f23eab11e5d58078c4b6f95a3e1d67bea700d65"} Dec 10 11:04:51 crc kubenswrapper[4780]: I1210 11:04:51.347755 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-56bd7bfcb4-rl4rr" event={"ID":"21a7de0d-bac7-4258-8062-414b665097c9","Type":"ContainerStarted","Data":"2c7b4c866e626ba5957056aa05c26c06f996890c5c81c1eea95d8e61f92a99d0"} Dec 10 11:04:51 crc kubenswrapper[4780]: I1210 11:04:51.356987 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" event={"ID":"52bc8018-7b47-4053-8bc1-3b686af14adf","Type":"ContainerStarted","Data":"e378bb99ddf1c3008c95138a68429d935bd63b4a79f438f3f26a8b6e2fb2d111"} Dec 10 11:04:51 crc kubenswrapper[4780]: I1210 11:04:51.390879 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-56bd7bfcb4-rl4rr" podStartSLOduration=3.390847758 podStartE2EDuration="3.390847758s" podCreationTimestamp="2025-12-10 11:04:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:04:51.371911745 +0000 UTC m=+1196.225305208" watchObservedRunningTime="2025-12-10 11:04:51.390847758 +0000 UTC m=+1196.244241201" Dec 10 11:04:59 crc kubenswrapper[4780]: I1210 11:04:59.240208 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:59 crc kubenswrapper[4780]: I1210 11:04:59.240977 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:59 crc kubenswrapper[4780]: I1210 11:04:59.248129 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:59 crc kubenswrapper[4780]: I1210 11:04:59.795524 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:04:59 crc kubenswrapper[4780]: I1210 11:04:59.865727 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5db8778c9-c5z2p"] Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.025977 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" event={"ID":"52bc8018-7b47-4053-8bc1-3b686af14adf","Type":"ContainerStarted","Data":"279d04e5f78101837c0360d95700df8b631a12cd31f7076b795516ed5de17bba"} Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.026586 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.035956 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" event={"ID":"ea0c76a7-e4a2-479c-8aa3-76ec59ce572b","Type":"ContainerStarted","Data":"fc39ecb1474ee523672b2e6d51df13c0539052461238320d04b95a0fbd1b8f12"} Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.039225 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-jx5fq" event={"ID":"e195bedf-9712-4f1f-a9f7-9f4dabdd710b","Type":"ContainerStarted","Data":"4bc2e7ae58914c1d4f0be252a075580094202e899c530c541de947bbb005763f"} Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.039745 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.042789 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" event={"ID":"280c4015-3911-4b5b-b794-23e319640bd0","Type":"ContainerStarted","Data":"b300e35d7dc7001672341728382a91befa1a51f50df90758716776b68f773818"} Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.047249 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" podStartSLOduration=3.046274834 podStartE2EDuration="22.047230564s" podCreationTimestamp="2025-12-10 11:04:48 +0000 UTC" firstStartedPulling="2025-12-10 11:04:50.079363963 +0000 UTC m=+1194.932757406" lastFinishedPulling="2025-12-10 11:05:09.080319693 +0000 UTC m=+1213.933713136" observedRunningTime="2025-12-10 11:05:10.044603808 +0000 UTC m=+1214.897997251" watchObservedRunningTime="2025-12-10 11:05:10.047230564 +0000 UTC m=+1214.900624007" Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.164963 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-jx5fq" podStartSLOduration=2.119187551 podStartE2EDuration="22.16494182s" podCreationTimestamp="2025-12-10 11:04:48 +0000 UTC" firstStartedPulling="2025-12-10 11:04:48.861592765 +0000 UTC m=+1193.714986208" lastFinishedPulling="2025-12-10 11:05:08.907347034 +0000 UTC m=+1213.760740477" observedRunningTime="2025-12-10 11:05:10.161665768 +0000 UTC m=+1215.015059221" watchObservedRunningTime="2025-12-10 11:05:10.16494182 +0000 UTC m=+1215.018335263" Dec 10 11:05:10 crc kubenswrapper[4780]: I1210 11:05:10.193452 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-b9mk2" podStartSLOduration=3.188636428 podStartE2EDuration="22.193421203s" podCreationTimestamp="2025-12-10 11:04:48 +0000 UTC" firstStartedPulling="2025-12-10 11:04:49.903303357 +0000 UTC m=+1194.756696800" lastFinishedPulling="2025-12-10 11:05:08.908088132 +0000 UTC m=+1213.761481575" observedRunningTime="2025-12-10 11:05:10.17971425 +0000 UTC m=+1215.033107693" watchObservedRunningTime="2025-12-10 11:05:10.193421203 +0000 UTC m=+1215.046814646" Dec 10 11:05:18 crc kubenswrapper[4780]: I1210 11:05:18.798124 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-jx5fq" Dec 10 11:05:19 crc kubenswrapper[4780]: I1210 11:05:19.121718 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" event={"ID":"280c4015-3911-4b5b-b794-23e319640bd0","Type":"ContainerStarted","Data":"f36d27e28059824a320ab6836c94bce80b55a517e289735d49f3601d8e43dc68"} Dec 10 11:05:19 crc kubenswrapper[4780]: I1210 11:05:19.140344 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-7dzr2" podStartSLOduration=2.966033116 podStartE2EDuration="31.140320082s" podCreationTimestamp="2025-12-10 11:04:48 +0000 UTC" firstStartedPulling="2025-12-10 11:04:49.598496439 +0000 UTC m=+1194.451889882" lastFinishedPulling="2025-12-10 11:05:17.772783405 +0000 UTC m=+1222.626176848" observedRunningTime="2025-12-10 11:05:19.137907782 +0000 UTC m=+1223.991301225" watchObservedRunningTime="2025-12-10 11:05:19.140320082 +0000 UTC m=+1223.993713525" Dec 10 11:05:19 crc kubenswrapper[4780]: I1210 11:05:19.381335 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vqlpg" Dec 10 11:05:24 crc kubenswrapper[4780]: I1210 11:05:24.972270 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5db8778c9-c5z2p" podUID="ff1667d0-453d-4b4f-bced-0452da294f98" containerName="console" containerID="cri-o://1d77a9bacf7497f45a701d126816fd8b00288797839a1f4043e23167335196c0" gracePeriod=15 Dec 10 11:05:25 crc kubenswrapper[4780]: I1210 11:05:25.179346 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5db8778c9-c5z2p_ff1667d0-453d-4b4f-bced-0452da294f98/console/0.log" Dec 10 11:05:25 crc kubenswrapper[4780]: I1210 11:05:25.179413 4780 generic.go:334] "Generic (PLEG): container finished" podID="ff1667d0-453d-4b4f-bced-0452da294f98" containerID="1d77a9bacf7497f45a701d126816fd8b00288797839a1f4043e23167335196c0" exitCode=2 Dec 10 11:05:25 crc kubenswrapper[4780]: I1210 11:05:25.179455 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5db8778c9-c5z2p" event={"ID":"ff1667d0-453d-4b4f-bced-0452da294f98","Type":"ContainerDied","Data":"1d77a9bacf7497f45a701d126816fd8b00288797839a1f4043e23167335196c0"} Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.008093 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5db8778c9-c5z2p_ff1667d0-453d-4b4f-bced-0452da294f98/console/0.log" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.008510 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.103120 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-oauth-serving-cert\") pod \"ff1667d0-453d-4b4f-bced-0452da294f98\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.103234 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-service-ca\") pod \"ff1667d0-453d-4b4f-bced-0452da294f98\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.103295 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rvj5\" (UniqueName: \"kubernetes.io/projected/ff1667d0-453d-4b4f-bced-0452da294f98-kube-api-access-9rvj5\") pod \"ff1667d0-453d-4b4f-bced-0452da294f98\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.103322 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-oauth-config\") pod \"ff1667d0-453d-4b4f-bced-0452da294f98\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.103349 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-console-config\") pod \"ff1667d0-453d-4b4f-bced-0452da294f98\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.103399 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-serving-cert\") pod \"ff1667d0-453d-4b4f-bced-0452da294f98\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.103416 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-trusted-ca-bundle\") pod \"ff1667d0-453d-4b4f-bced-0452da294f98\" (UID: \"ff1667d0-453d-4b4f-bced-0452da294f98\") " Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.104641 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ff1667d0-453d-4b4f-bced-0452da294f98" (UID: "ff1667d0-453d-4b4f-bced-0452da294f98"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.104664 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-service-ca" (OuterVolumeSpecName: "service-ca") pod "ff1667d0-453d-4b4f-bced-0452da294f98" (UID: "ff1667d0-453d-4b4f-bced-0452da294f98"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.106055 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-console-config" (OuterVolumeSpecName: "console-config") pod "ff1667d0-453d-4b4f-bced-0452da294f98" (UID: "ff1667d0-453d-4b4f-bced-0452da294f98"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.106284 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ff1667d0-453d-4b4f-bced-0452da294f98" (UID: "ff1667d0-453d-4b4f-bced-0452da294f98"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.110842 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff1667d0-453d-4b4f-bced-0452da294f98-kube-api-access-9rvj5" (OuterVolumeSpecName: "kube-api-access-9rvj5") pod "ff1667d0-453d-4b4f-bced-0452da294f98" (UID: "ff1667d0-453d-4b4f-bced-0452da294f98"). InnerVolumeSpecName "kube-api-access-9rvj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.111064 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ff1667d0-453d-4b4f-bced-0452da294f98" (UID: "ff1667d0-453d-4b4f-bced-0452da294f98"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.111833 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ff1667d0-453d-4b4f-bced-0452da294f98" (UID: "ff1667d0-453d-4b4f-bced-0452da294f98"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.190226 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5db8778c9-c5z2p_ff1667d0-453d-4b4f-bced-0452da294f98/console/0.log" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.190395 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5db8778c9-c5z2p" event={"ID":"ff1667d0-453d-4b4f-bced-0452da294f98","Type":"ContainerDied","Data":"62b3cba350ecfc7bf41e054269b20869c16efce0e9b65ec93818587a78e1d17b"} Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.190479 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5db8778c9-c5z2p" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.190637 4780 scope.go:117] "RemoveContainer" containerID="1d77a9bacf7497f45a701d126816fd8b00288797839a1f4043e23167335196c0" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.207339 4780 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.207411 4780 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.207431 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rvj5\" (UniqueName: \"kubernetes.io/projected/ff1667d0-453d-4b4f-bced-0452da294f98-kube-api-access-9rvj5\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.207454 4780 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.207473 4780 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.207492 4780 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ff1667d0-453d-4b4f-bced-0452da294f98-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.207509 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ff1667d0-453d-4b4f-bced-0452da294f98-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.233475 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5db8778c9-c5z2p"] Dec 10 11:05:26 crc kubenswrapper[4780]: I1210 11:05:26.240069 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-5db8778c9-c5z2p"] Dec 10 11:05:27 crc kubenswrapper[4780]: I1210 11:05:27.476351 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:05:27 crc kubenswrapper[4780]: I1210 11:05:27.476766 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:05:27 crc kubenswrapper[4780]: I1210 11:05:27.968582 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff1667d0-453d-4b4f-bced-0452da294f98" path="/var/lib/kubelet/pods/ff1667d0-453d-4b4f-bced-0452da294f98/volumes" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.611179 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx"] Dec 10 11:05:40 crc kubenswrapper[4780]: E1210 11:05:40.612129 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff1667d0-453d-4b4f-bced-0452da294f98" containerName="console" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.612146 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff1667d0-453d-4b4f-bced-0452da294f98" containerName="console" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.612441 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff1667d0-453d-4b4f-bced-0452da294f98" containerName="console" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.614021 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.616330 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.620778 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx"] Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.694224 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxvmg\" (UniqueName: \"kubernetes.io/projected/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-kube-api-access-mxvmg\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.694283 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.694370 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.795118 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxvmg\" (UniqueName: \"kubernetes.io/projected/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-kube-api-access-mxvmg\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.795190 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.795262 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.795799 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.795813 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.822879 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxvmg\" (UniqueName: \"kubernetes.io/projected/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-kube-api-access-mxvmg\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:40 crc kubenswrapper[4780]: I1210 11:05:40.948838 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:41 crc kubenswrapper[4780]: I1210 11:05:41.396347 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx"] Dec 10 11:05:42 crc kubenswrapper[4780]: I1210 11:05:42.326435 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" event={"ID":"a919dc65-d37b-4f09-b396-7dc2aa7ea03a","Type":"ContainerStarted","Data":"a4595df11ffae70a2dfccc319fd0e3a7931d8d7c5a9b454d019ca5a098e764f7"} Dec 10 11:05:42 crc kubenswrapper[4780]: I1210 11:05:42.326800 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" event={"ID":"a919dc65-d37b-4f09-b396-7dc2aa7ea03a","Type":"ContainerStarted","Data":"438cbcb2b44b3b9516e42a1a807e73206980de045a757f1391d63eb30c9007de"} Dec 10 11:05:43 crc kubenswrapper[4780]: I1210 11:05:43.336129 4780 generic.go:334] "Generic (PLEG): container finished" podID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerID="a4595df11ffae70a2dfccc319fd0e3a7931d8d7c5a9b454d019ca5a098e764f7" exitCode=0 Dec 10 11:05:43 crc kubenswrapper[4780]: I1210 11:05:43.336192 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" event={"ID":"a919dc65-d37b-4f09-b396-7dc2aa7ea03a","Type":"ContainerDied","Data":"a4595df11ffae70a2dfccc319fd0e3a7931d8d7c5a9b454d019ca5a098e764f7"} Dec 10 11:05:50 crc kubenswrapper[4780]: I1210 11:05:50.398279 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" event={"ID":"a919dc65-d37b-4f09-b396-7dc2aa7ea03a","Type":"ContainerStarted","Data":"f6b38ecd7be2ffa53b2aac0b564ce5e112d88c1fc05df24f6aa37b207e924749"} Dec 10 11:05:51 crc kubenswrapper[4780]: I1210 11:05:51.412114 4780 generic.go:334] "Generic (PLEG): container finished" podID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerID="f6b38ecd7be2ffa53b2aac0b564ce5e112d88c1fc05df24f6aa37b207e924749" exitCode=0 Dec 10 11:05:51 crc kubenswrapper[4780]: I1210 11:05:51.412202 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" event={"ID":"a919dc65-d37b-4f09-b396-7dc2aa7ea03a","Type":"ContainerDied","Data":"f6b38ecd7be2ffa53b2aac0b564ce5e112d88c1fc05df24f6aa37b207e924749"} Dec 10 11:05:52 crc kubenswrapper[4780]: I1210 11:05:52.424822 4780 generic.go:334] "Generic (PLEG): container finished" podID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerID="e67ff93f73bee2862c061c2d74be7ff86593f0bbe13e0850a199ac7764adcb23" exitCode=0 Dec 10 11:05:52 crc kubenswrapper[4780]: I1210 11:05:52.424978 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" event={"ID":"a919dc65-d37b-4f09-b396-7dc2aa7ea03a","Type":"ContainerDied","Data":"e67ff93f73bee2862c061c2d74be7ff86593f0bbe13e0850a199ac7764adcb23"} Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.734427 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.799231 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mxvmg\" (UniqueName: \"kubernetes.io/projected/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-kube-api-access-mxvmg\") pod \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.799341 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-bundle\") pod \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.799510 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-util\") pod \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\" (UID: \"a919dc65-d37b-4f09-b396-7dc2aa7ea03a\") " Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.801620 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-bundle" (OuterVolumeSpecName: "bundle") pod "a919dc65-d37b-4f09-b396-7dc2aa7ea03a" (UID: "a919dc65-d37b-4f09-b396-7dc2aa7ea03a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.809232 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-kube-api-access-mxvmg" (OuterVolumeSpecName: "kube-api-access-mxvmg") pod "a919dc65-d37b-4f09-b396-7dc2aa7ea03a" (UID: "a919dc65-d37b-4f09-b396-7dc2aa7ea03a"). InnerVolumeSpecName "kube-api-access-mxvmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.815392 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-util" (OuterVolumeSpecName: "util") pod "a919dc65-d37b-4f09-b396-7dc2aa7ea03a" (UID: "a919dc65-d37b-4f09-b396-7dc2aa7ea03a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.902209 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mxvmg\" (UniqueName: \"kubernetes.io/projected/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-kube-api-access-mxvmg\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.902268 4780 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:53 crc kubenswrapper[4780]: I1210 11:05:53.902283 4780 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a919dc65-d37b-4f09-b396-7dc2aa7ea03a-util\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:54 crc kubenswrapper[4780]: I1210 11:05:54.442219 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" event={"ID":"a919dc65-d37b-4f09-b396-7dc2aa7ea03a","Type":"ContainerDied","Data":"438cbcb2b44b3b9516e42a1a807e73206980de045a757f1391d63eb30c9007de"} Dec 10 11:05:54 crc kubenswrapper[4780]: I1210 11:05:54.442275 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx" Dec 10 11:05:54 crc kubenswrapper[4780]: I1210 11:05:54.442288 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="438cbcb2b44b3b9516e42a1a807e73206980de045a757f1391d63eb30c9007de" Dec 10 11:05:57 crc kubenswrapper[4780]: I1210 11:05:57.475960 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:05:57 crc kubenswrapper[4780]: I1210 11:05:57.477129 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.260621 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk"] Dec 10 11:06:04 crc kubenswrapper[4780]: E1210 11:06:04.265165 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerName="pull" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.265192 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerName="pull" Dec 10 11:06:04 crc kubenswrapper[4780]: E1210 11:06:04.265220 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerName="util" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.265227 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerName="util" Dec 10 11:06:04 crc kubenswrapper[4780]: E1210 11:06:04.265259 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerName="extract" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.265266 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerName="extract" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.265476 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="a919dc65-d37b-4f09-b396-7dc2aa7ea03a" containerName="extract" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.266336 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.268423 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-gtvvv" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.268808 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.269417 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.269596 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.274434 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.297998 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk"] Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.319126 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvdlb\" (UniqueName: \"kubernetes.io/projected/d811e5e5-930d-403f-92a3-0fb26a063acc-kube-api-access-zvdlb\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.319614 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d811e5e5-930d-403f-92a3-0fb26a063acc-apiservice-cert\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.319669 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d811e5e5-930d-403f-92a3-0fb26a063acc-webhook-cert\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.421622 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvdlb\" (UniqueName: \"kubernetes.io/projected/d811e5e5-930d-403f-92a3-0fb26a063acc-kube-api-access-zvdlb\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.421728 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d811e5e5-930d-403f-92a3-0fb26a063acc-apiservice-cert\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.421760 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d811e5e5-930d-403f-92a3-0fb26a063acc-webhook-cert\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.444559 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d811e5e5-930d-403f-92a3-0fb26a063acc-apiservice-cert\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.444568 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d811e5e5-930d-403f-92a3-0fb26a063acc-webhook-cert\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.452733 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvdlb\" (UniqueName: \"kubernetes.io/projected/d811e5e5-930d-403f-92a3-0fb26a063acc-kube-api-access-zvdlb\") pod \"metallb-operator-controller-manager-6fb7f5d8bb-v59tk\" (UID: \"d811e5e5-930d-403f-92a3-0fb26a063acc\") " pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.555814 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t"] Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.557018 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.561404 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.561404 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.561673 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-24fmn" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.586387 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t"] Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.596737 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.726442 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-apiservice-cert\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.726601 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-webhook-cert\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.726732 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5q69\" (UniqueName: \"kubernetes.io/projected/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-kube-api-access-c5q69\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.828896 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5q69\" (UniqueName: \"kubernetes.io/projected/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-kube-api-access-c5q69\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.828983 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-apiservice-cert\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.829051 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-webhook-cert\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.833891 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-apiservice-cert\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.838527 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-webhook-cert\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.847211 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5q69\" (UniqueName: \"kubernetes.io/projected/2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7-kube-api-access-c5q69\") pod \"metallb-operator-webhook-server-b44d54f55-86z5t\" (UID: \"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7\") " pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:04 crc kubenswrapper[4780]: I1210 11:06:04.876098 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:05 crc kubenswrapper[4780]: I1210 11:06:05.151548 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk"] Dec 10 11:06:05 crc kubenswrapper[4780]: W1210 11:06:05.392898 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd811e5e5_930d_403f_92a3_0fb26a063acc.slice/crio-75f9275a9e9133832348399b58b8678e78967461af63d1ebfbd5e71d5bbc3c29 WatchSource:0}: Error finding container 75f9275a9e9133832348399b58b8678e78967461af63d1ebfbd5e71d5bbc3c29: Status 404 returned error can't find the container with id 75f9275a9e9133832348399b58b8678e78967461af63d1ebfbd5e71d5bbc3c29 Dec 10 11:06:05 crc kubenswrapper[4780]: I1210 11:06:05.441379 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t"] Dec 10 11:06:05 crc kubenswrapper[4780]: I1210 11:06:05.570886 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" event={"ID":"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7","Type":"ContainerStarted","Data":"9fbb2ac04909907a7d6fa0f1634d0775df451d71785cc9a201afb4710e9774b3"} Dec 10 11:06:05 crc kubenswrapper[4780]: I1210 11:06:05.584575 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" event={"ID":"d811e5e5-930d-403f-92a3-0fb26a063acc","Type":"ContainerStarted","Data":"75f9275a9e9133832348399b58b8678e78967461af63d1ebfbd5e71d5bbc3c29"} Dec 10 11:06:24 crc kubenswrapper[4780]: I1210 11:06:24.172838 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" event={"ID":"d811e5e5-930d-403f-92a3-0fb26a063acc","Type":"ContainerStarted","Data":"09cdd506523172697ded0db62e849f0920cb2884fa16521396de08faa0f58ff0"} Dec 10 11:06:24 crc kubenswrapper[4780]: I1210 11:06:24.173614 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:24 crc kubenswrapper[4780]: I1210 11:06:24.202484 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" podStartSLOduration=2.133939622 podStartE2EDuration="20.202434503s" podCreationTimestamp="2025-12-10 11:06:04 +0000 UTC" firstStartedPulling="2025-12-10 11:06:05.414005352 +0000 UTC m=+1270.267398795" lastFinishedPulling="2025-12-10 11:06:23.482500233 +0000 UTC m=+1288.335893676" observedRunningTime="2025-12-10 11:06:24.200969716 +0000 UTC m=+1289.054363169" watchObservedRunningTime="2025-12-10 11:06:24.202434503 +0000 UTC m=+1289.055827946" Dec 10 11:06:25 crc kubenswrapper[4780]: I1210 11:06:25.183591 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" event={"ID":"2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7","Type":"ContainerStarted","Data":"da54deba4f029eff19568420a8c9b1a298332e6c53d173c43709f5dfc531ed5b"} Dec 10 11:06:25 crc kubenswrapper[4780]: I1210 11:06:25.421404 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" podStartSLOduration=2.353879266 podStartE2EDuration="21.421376541s" podCreationTimestamp="2025-12-10 11:06:04 +0000 UTC" firstStartedPulling="2025-12-10 11:06:05.47303478 +0000 UTC m=+1270.326428223" lastFinishedPulling="2025-12-10 11:06:24.540532055 +0000 UTC m=+1289.393925498" observedRunningTime="2025-12-10 11:06:25.215693073 +0000 UTC m=+1290.069086516" watchObservedRunningTime="2025-12-10 11:06:25.421376541 +0000 UTC m=+1290.274769984" Dec 10 11:06:26 crc kubenswrapper[4780]: I1210 11:06:26.191070 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:27 crc kubenswrapper[4780]: I1210 11:06:27.475908 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:06:27 crc kubenswrapper[4780]: I1210 11:06:27.476052 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:06:27 crc kubenswrapper[4780]: I1210 11:06:27.476115 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:06:27 crc kubenswrapper[4780]: I1210 11:06:27.477024 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2a20f929d413d862ac186ee1144a2d1d554405829efe199ddb0dfa3f0f9ae340"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:06:27 crc kubenswrapper[4780]: I1210 11:06:27.477142 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://2a20f929d413d862ac186ee1144a2d1d554405829efe199ddb0dfa3f0f9ae340" gracePeriod=600 Dec 10 11:06:28 crc kubenswrapper[4780]: I1210 11:06:28.209529 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="2a20f929d413d862ac186ee1144a2d1d554405829efe199ddb0dfa3f0f9ae340" exitCode=0 Dec 10 11:06:28 crc kubenswrapper[4780]: I1210 11:06:28.209868 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"2a20f929d413d862ac186ee1144a2d1d554405829efe199ddb0dfa3f0f9ae340"} Dec 10 11:06:28 crc kubenswrapper[4780]: I1210 11:06:28.209962 4780 scope.go:117] "RemoveContainer" containerID="ed771cb9f33bfa44077ddff43b64d4340b6f781baf12fbbaaac2b0023588cc1c" Dec 10 11:06:29 crc kubenswrapper[4780]: I1210 11:06:29.264169 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"c3c4f9f16910550f67c3bdc81fc9c721bc946d7793a35038605a3c1b6eb79b3b"} Dec 10 11:06:34 crc kubenswrapper[4780]: I1210 11:06:34.883942 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-b44d54f55-86z5t" Dec 10 11:06:54 crc kubenswrapper[4780]: I1210 11:06:54.600222 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6fb7f5d8bb-v59tk" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.347355 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5"] Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.349348 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.361613 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-q795k" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.372454 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.374334 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-jcfnl"] Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.378819 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.381714 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.382501 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446089 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6974b1ff-a49a-4211-90a1-802e36919842-metrics-certs\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446141 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5"] Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446186 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-cllk5\" (UID: \"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446277 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-frr-conf\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446409 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-reloader\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446731 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgvn4\" (UniqueName: \"kubernetes.io/projected/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-kube-api-access-qgvn4\") pod \"frr-k8s-webhook-server-7fcb986d4-cllk5\" (UID: \"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446799 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp2qt\" (UniqueName: \"kubernetes.io/projected/6974b1ff-a49a-4211-90a1-802e36919842-kube-api-access-dp2qt\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446870 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-metrics\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.446976 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-frr-sockets\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.447024 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6974b1ff-a49a-4211-90a1-802e36919842-frr-startup\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.477823 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-hb4wd"] Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.483053 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.490546 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.490690 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-jrfxl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.490575 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.491213 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.505122 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-xs4hm"] Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.509794 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.516596 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-xs4hm"] Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.517270 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.548804 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6974b1ff-a49a-4211-90a1-802e36919842-metrics-certs\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.548887 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-cllk5\" (UID: \"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.548911 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-frr-conf\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.548966 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-reloader\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.549094 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgvn4\" (UniqueName: \"kubernetes.io/projected/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-kube-api-access-qgvn4\") pod \"frr-k8s-webhook-server-7fcb986d4-cllk5\" (UID: \"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.549117 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp2qt\" (UniqueName: \"kubernetes.io/projected/6974b1ff-a49a-4211-90a1-802e36919842-kube-api-access-dp2qt\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.549146 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-metrics\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.549156 4780 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.549374 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6974b1ff-a49a-4211-90a1-802e36919842-metrics-certs podName:6974b1ff-a49a-4211-90a1-802e36919842 nodeName:}" failed. No retries permitted until 2025-12-10 11:06:56.049311674 +0000 UTC m=+1320.902705117 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6974b1ff-a49a-4211-90a1-802e36919842-metrics-certs") pod "frr-k8s-jcfnl" (UID: "6974b1ff-a49a-4211-90a1-802e36919842") : secret "frr-k8s-certs-secret" not found Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.549185 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-frr-sockets\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.549541 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6974b1ff-a49a-4211-90a1-802e36919842-frr-startup\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.549839 4780 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.550041 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-cert podName:b32ab25c-5e31-4efb-b8d0-0ae92e4e0165 nodeName:}" failed. No retries permitted until 2025-12-10 11:06:56.050011321 +0000 UTC m=+1320.903404764 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-cert") pod "frr-k8s-webhook-server-7fcb986d4-cllk5" (UID: "b32ab25c-5e31-4efb-b8d0-0ae92e4e0165") : secret "frr-k8s-webhook-server-cert" not found Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.549908 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-frr-sockets\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.550205 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-metrics\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.550275 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-frr-conf\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.550578 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6974b1ff-a49a-4211-90a1-802e36919842-reloader\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.552907 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6974b1ff-a49a-4211-90a1-802e36919842-frr-startup\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.574004 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgvn4\" (UniqueName: \"kubernetes.io/projected/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-kube-api-access-qgvn4\") pod \"frr-k8s-webhook-server-7fcb986d4-cllk5\" (UID: \"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.574124 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp2qt\" (UniqueName: \"kubernetes.io/projected/6974b1ff-a49a-4211-90a1-802e36919842-kube-api-access-dp2qt\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.651907 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.652022 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-metrics-certs\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.652100 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/2b6296fd-1a9f-4737-b9dd-332a1db07171-metallb-excludel2\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.652128 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-cert\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.652224 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsv5p\" (UniqueName: \"kubernetes.io/projected/2b6296fd-1a9f-4737-b9dd-332a1db07171-kube-api-access-vsv5p\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.652269 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gvzx\" (UniqueName: \"kubernetes.io/projected/563406a8-97e3-4591-8448-a666a0ccaacc-kube-api-access-6gvzx\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.652288 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-metrics-certs\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.754283 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.754676 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-metrics-certs\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.754840 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/2b6296fd-1a9f-4737-b9dd-332a1db07171-metallb-excludel2\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.755007 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-cert\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.755137 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsv5p\" (UniqueName: \"kubernetes.io/projected/2b6296fd-1a9f-4737-b9dd-332a1db07171-kube-api-access-vsv5p\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.755236 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gvzx\" (UniqueName: \"kubernetes.io/projected/563406a8-97e3-4591-8448-a666a0ccaacc-kube-api-access-6gvzx\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.755328 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-metrics-certs\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.755790 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/2b6296fd-1a9f-4737-b9dd-332a1db07171-metallb-excludel2\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.754529 4780 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.754788 4780 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.757139 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-metrics-certs podName:563406a8-97e3-4591-8448-a666a0ccaacc nodeName:}" failed. No retries permitted until 2025-12-10 11:06:56.257094784 +0000 UTC m=+1321.110488287 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-metrics-certs") pod "controller-f8648f98b-xs4hm" (UID: "563406a8-97e3-4591-8448-a666a0ccaacc") : secret "controller-certs-secret" not found Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.755586 4780 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.757408 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-metrics-certs podName:2b6296fd-1a9f-4737-b9dd-332a1db07171 nodeName:}" failed. No retries permitted until 2025-12-10 11:06:56.257397792 +0000 UTC m=+1321.110791235 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-metrics-certs") pod "speaker-hb4wd" (UID: "2b6296fd-1a9f-4737-b9dd-332a1db07171") : secret "speaker-certs-secret" not found Dec 10 11:06:55 crc kubenswrapper[4780]: E1210 11:06:55.757486 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist podName:2b6296fd-1a9f-4737-b9dd-332a1db07171 nodeName:}" failed. No retries permitted until 2025-12-10 11:06:56.257469564 +0000 UTC m=+1321.110863097 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist") pod "speaker-hb4wd" (UID: "2b6296fd-1a9f-4737-b9dd-332a1db07171") : secret "metallb-memberlist" not found Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.762427 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-cert\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.776892 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gvzx\" (UniqueName: \"kubernetes.io/projected/563406a8-97e3-4591-8448-a666a0ccaacc-kube-api-access-6gvzx\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:55 crc kubenswrapper[4780]: I1210 11:06:55.778524 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsv5p\" (UniqueName: \"kubernetes.io/projected/2b6296fd-1a9f-4737-b9dd-332a1db07171-kube-api-access-vsv5p\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.062003 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6974b1ff-a49a-4211-90a1-802e36919842-metrics-certs\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.062848 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-cllk5\" (UID: \"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.072199 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b32ab25c-5e31-4efb-b8d0-0ae92e4e0165-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-cllk5\" (UID: \"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.089623 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6974b1ff-a49a-4211-90a1-802e36919842-metrics-certs\") pod \"frr-k8s-jcfnl\" (UID: \"6974b1ff-a49a-4211-90a1-802e36919842\") " pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.266615 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-metrics-certs\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.266747 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.266791 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-metrics-certs\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:56 crc kubenswrapper[4780]: E1210 11:06:56.267281 4780 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 10 11:06:56 crc kubenswrapper[4780]: E1210 11:06:56.267376 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist podName:2b6296fd-1a9f-4737-b9dd-332a1db07171 nodeName:}" failed. No retries permitted until 2025-12-10 11:06:57.267357396 +0000 UTC m=+1322.120750839 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist") pod "speaker-hb4wd" (UID: "2b6296fd-1a9f-4737-b9dd-332a1db07171") : secret "metallb-memberlist" not found Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.270062 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-metrics-certs\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.270403 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/563406a8-97e3-4591-8448-a666a0ccaacc-metrics-certs\") pod \"controller-f8648f98b-xs4hm\" (UID: \"563406a8-97e3-4591-8448-a666a0ccaacc\") " pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.281377 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-q795k" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.290228 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.306272 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.430483 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.775872 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5"] Dec 10 11:06:56 crc kubenswrapper[4780]: I1210 11:06:56.922899 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-xs4hm"] Dec 10 11:06:56 crc kubenswrapper[4780]: W1210 11:06:56.927718 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod563406a8_97e3_4591_8448_a666a0ccaacc.slice/crio-596a132c22f53a83c80f7ea5b68faba979b8b8001453a53740dfa908b2b16171 WatchSource:0}: Error finding container 596a132c22f53a83c80f7ea5b68faba979b8b8001453a53740dfa908b2b16171: Status 404 returned error can't find the container with id 596a132c22f53a83c80f7ea5b68faba979b8b8001453a53740dfa908b2b16171 Dec 10 11:06:57 crc kubenswrapper[4780]: I1210 11:06:57.283090 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:57 crc kubenswrapper[4780]: I1210 11:06:57.290596 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/2b6296fd-1a9f-4737-b9dd-332a1db07171-memberlist\") pod \"speaker-hb4wd\" (UID: \"2b6296fd-1a9f-4737-b9dd-332a1db07171\") " pod="metallb-system/speaker-hb4wd" Dec 10 11:06:57 crc kubenswrapper[4780]: I1210 11:06:57.306737 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-xs4hm" event={"ID":"563406a8-97e3-4591-8448-a666a0ccaacc","Type":"ContainerStarted","Data":"3d3c4092d328e8db36a5dca98a160a6eb28d900b619944cc0cc5b6ff2d4a51ba"} Dec 10 11:06:57 crc kubenswrapper[4780]: I1210 11:06:57.306966 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-xs4hm" event={"ID":"563406a8-97e3-4591-8448-a666a0ccaacc","Type":"ContainerStarted","Data":"596a132c22f53a83c80f7ea5b68faba979b8b8001453a53740dfa908b2b16171"} Dec 10 11:06:57 crc kubenswrapper[4780]: I1210 11:06:57.308549 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerStarted","Data":"4410df74cf666f4862f18aa53ca5f8d7b736da2900705d708703823a04bea3fc"} Dec 10 11:06:57 crc kubenswrapper[4780]: I1210 11:06:57.309637 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" event={"ID":"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165","Type":"ContainerStarted","Data":"f7ce512844873e4af7233d90d796c1bda09ef246e6ff36d16f7b3a4c91eefd89"} Dec 10 11:06:57 crc kubenswrapper[4780]: I1210 11:06:57.315659 4780 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-jrfxl" Dec 10 11:06:57 crc kubenswrapper[4780]: I1210 11:06:57.324386 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-hb4wd" Dec 10 11:06:57 crc kubenswrapper[4780]: W1210 11:06:57.370799 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b6296fd_1a9f_4737_b9dd_332a1db07171.slice/crio-d37e36420e61a6fcecbc4843f0d5a7865778bd5ef1b213bf4834e75dbbf423f4 WatchSource:0}: Error finding container d37e36420e61a6fcecbc4843f0d5a7865778bd5ef1b213bf4834e75dbbf423f4: Status 404 returned error can't find the container with id d37e36420e61a6fcecbc4843f0d5a7865778bd5ef1b213bf4834e75dbbf423f4 Dec 10 11:06:58 crc kubenswrapper[4780]: I1210 11:06:58.482252 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-hb4wd" event={"ID":"2b6296fd-1a9f-4737-b9dd-332a1db07171","Type":"ContainerStarted","Data":"37424daa76096fd7019fa2bb3463116f17d489162ea0e70005f0aaa1148634db"} Dec 10 11:06:58 crc kubenswrapper[4780]: I1210 11:06:58.483798 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-hb4wd" event={"ID":"2b6296fd-1a9f-4737-b9dd-332a1db07171","Type":"ContainerStarted","Data":"d37e36420e61a6fcecbc4843f0d5a7865778bd5ef1b213bf4834e75dbbf423f4"} Dec 10 11:06:58 crc kubenswrapper[4780]: I1210 11:06:58.509747 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-xs4hm" event={"ID":"563406a8-97e3-4591-8448-a666a0ccaacc","Type":"ContainerStarted","Data":"487365d5642dad239ff1abcb7fd7db344d277e236d328510196df9b5940a28cd"} Dec 10 11:06:58 crc kubenswrapper[4780]: I1210 11:06:58.510561 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:06:58 crc kubenswrapper[4780]: I1210 11:06:58.537017 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-xs4hm" podStartSLOduration=3.536987821 podStartE2EDuration="3.536987821s" podCreationTimestamp="2025-12-10 11:06:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:58.533202627 +0000 UTC m=+1323.386596070" watchObservedRunningTime="2025-12-10 11:06:58.536987821 +0000 UTC m=+1323.390381264" Dec 10 11:06:59 crc kubenswrapper[4780]: I1210 11:06:59.525330 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-hb4wd" event={"ID":"2b6296fd-1a9f-4737-b9dd-332a1db07171","Type":"ContainerStarted","Data":"ee2dd6b36b10744745404ba2c455f9a713c7c9ec5c7a56ad177c66fe99e56e67"} Dec 10 11:07:00 crc kubenswrapper[4780]: I1210 11:07:00.540821 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-hb4wd" Dec 10 11:07:06 crc kubenswrapper[4780]: I1210 11:07:06.063210 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-hb4wd" podStartSLOduration=11.06306993 podStartE2EDuration="11.06306993s" podCreationTimestamp="2025-12-10 11:06:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:59.555439932 +0000 UTC m=+1324.408833385" watchObservedRunningTime="2025-12-10 11:07:06.06306993 +0000 UTC m=+1330.916463383" Dec 10 11:07:07 crc kubenswrapper[4780]: I1210 11:07:07.332054 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-hb4wd" Dec 10 11:07:09 crc kubenswrapper[4780]: I1210 11:07:09.884686 4780 generic.go:334] "Generic (PLEG): container finished" podID="6974b1ff-a49a-4211-90a1-802e36919842" containerID="d4893495525800509cbe5de1d98c79c5a48d64cb69169abdc2048c7042c4fdba" exitCode=0 Dec 10 11:07:09 crc kubenswrapper[4780]: I1210 11:07:09.884831 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerDied","Data":"d4893495525800509cbe5de1d98c79c5a48d64cb69169abdc2048c7042c4fdba"} Dec 10 11:07:09 crc kubenswrapper[4780]: I1210 11:07:09.889505 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" event={"ID":"b32ab25c-5e31-4efb-b8d0-0ae92e4e0165","Type":"ContainerStarted","Data":"321757b78d8731b39dd543ce15061c4882a7e3778e9c483a1b813e1e0c81c75f"} Dec 10 11:07:09 crc kubenswrapper[4780]: I1210 11:07:09.889796 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.176446 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" podStartSLOduration=3.035124631 podStartE2EDuration="15.176397531s" podCreationTimestamp="2025-12-10 11:06:55 +0000 UTC" firstStartedPulling="2025-12-10 11:06:56.788665044 +0000 UTC m=+1321.642058487" lastFinishedPulling="2025-12-10 11:07:08.929937944 +0000 UTC m=+1333.783331387" observedRunningTime="2025-12-10 11:07:10.168955455 +0000 UTC m=+1335.022348898" watchObservedRunningTime="2025-12-10 11:07:10.176397531 +0000 UTC m=+1335.029790974" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.445343 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-8qhzf"] Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.446818 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8qhzf" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.450064 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.450382 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-7hvkx" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.454657 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.489007 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8qhzf"] Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.612256 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92vz9\" (UniqueName: \"kubernetes.io/projected/29e7c71f-7673-4198-97a0-fecf6cf69393-kube-api-access-92vz9\") pod \"openstack-operator-index-8qhzf\" (UID: \"29e7c71f-7673-4198-97a0-fecf6cf69393\") " pod="openstack-operators/openstack-operator-index-8qhzf" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.714203 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92vz9\" (UniqueName: \"kubernetes.io/projected/29e7c71f-7673-4198-97a0-fecf6cf69393-kube-api-access-92vz9\") pod \"openstack-operator-index-8qhzf\" (UID: \"29e7c71f-7673-4198-97a0-fecf6cf69393\") " pod="openstack-operators/openstack-operator-index-8qhzf" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.737018 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92vz9\" (UniqueName: \"kubernetes.io/projected/29e7c71f-7673-4198-97a0-fecf6cf69393-kube-api-access-92vz9\") pod \"openstack-operator-index-8qhzf\" (UID: \"29e7c71f-7673-4198-97a0-fecf6cf69393\") " pod="openstack-operators/openstack-operator-index-8qhzf" Dec 10 11:07:10 crc kubenswrapper[4780]: I1210 11:07:10.771987 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8qhzf" Dec 10 11:07:11 crc kubenswrapper[4780]: I1210 11:07:11.053621 4780 generic.go:334] "Generic (PLEG): container finished" podID="6974b1ff-a49a-4211-90a1-802e36919842" containerID="f752d4fae4fc2e0e9324a598ab80af9615895279a5f235a923bee155c1e249c7" exitCode=0 Dec 10 11:07:11 crc kubenswrapper[4780]: I1210 11:07:11.055232 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerDied","Data":"f752d4fae4fc2e0e9324a598ab80af9615895279a5f235a923bee155c1e249c7"} Dec 10 11:07:11 crc kubenswrapper[4780]: I1210 11:07:11.506615 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8qhzf"] Dec 10 11:07:12 crc kubenswrapper[4780]: I1210 11:07:12.063818 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8qhzf" event={"ID":"29e7c71f-7673-4198-97a0-fecf6cf69393","Type":"ContainerStarted","Data":"02e2ddc40c9684427610e703a76fa6809258302a329538dc32805823819829fe"} Dec 10 11:07:12 crc kubenswrapper[4780]: I1210 11:07:12.066457 4780 generic.go:334] "Generic (PLEG): container finished" podID="6974b1ff-a49a-4211-90a1-802e36919842" containerID="a7ae7abb234cd9c634155a25ded00fedf11593ea41b49b5b9e47a9037de67a7d" exitCode=0 Dec 10 11:07:12 crc kubenswrapper[4780]: I1210 11:07:12.066508 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerDied","Data":"a7ae7abb234cd9c634155a25ded00fedf11593ea41b49b5b9e47a9037de67a7d"} Dec 10 11:07:13 crc kubenswrapper[4780]: I1210 11:07:13.082865 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerStarted","Data":"d60efd18526a2b4cc9b909b1738162376417e0e2d99eca06285adecde74fa46c"} Dec 10 11:07:13 crc kubenswrapper[4780]: I1210 11:07:13.820219 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8qhzf"] Dec 10 11:07:14 crc kubenswrapper[4780]: I1210 11:07:14.100061 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerStarted","Data":"d9c759bfc98e16fd1bf859bbaf26997d1a670cf875eb1afacb1fc571ee2d0512"} Dec 10 11:07:14 crc kubenswrapper[4780]: I1210 11:07:14.426504 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-7lvcb"] Dec 10 11:07:14 crc kubenswrapper[4780]: I1210 11:07:14.429465 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:07:14 crc kubenswrapper[4780]: I1210 11:07:14.433644 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7lvcb"] Dec 10 11:07:14 crc kubenswrapper[4780]: I1210 11:07:14.549833 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbz2n\" (UniqueName: \"kubernetes.io/projected/d8ff2ff1-ecd6-4464-8366-1579f94d99d8-kube-api-access-wbz2n\") pod \"openstack-operator-index-7lvcb\" (UID: \"d8ff2ff1-ecd6-4464-8366-1579f94d99d8\") " pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:07:14 crc kubenswrapper[4780]: I1210 11:07:14.653502 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbz2n\" (UniqueName: \"kubernetes.io/projected/d8ff2ff1-ecd6-4464-8366-1579f94d99d8-kube-api-access-wbz2n\") pod \"openstack-operator-index-7lvcb\" (UID: \"d8ff2ff1-ecd6-4464-8366-1579f94d99d8\") " pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:07:14 crc kubenswrapper[4780]: I1210 11:07:14.674779 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbz2n\" (UniqueName: \"kubernetes.io/projected/d8ff2ff1-ecd6-4464-8366-1579f94d99d8-kube-api-access-wbz2n\") pod \"openstack-operator-index-7lvcb\" (UID: \"d8ff2ff1-ecd6-4464-8366-1579f94d99d8\") " pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:07:14 crc kubenswrapper[4780]: I1210 11:07:14.758192 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:07:15 crc kubenswrapper[4780]: I1210 11:07:15.114255 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerStarted","Data":"a446c3c5e0bd885e2249ee7c0c3c9ee59a40ca0811b98fe66d8bcf9d5b665de4"} Dec 10 11:07:15 crc kubenswrapper[4780]: I1210 11:07:15.210682 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7lvcb"] Dec 10 11:07:16 crc kubenswrapper[4780]: I1210 11:07:16.235007 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerStarted","Data":"0697631c7bf4cbacec6db0fd0ad75f4870e13c0c8e4cafd2efd0de3ecaed1cad"} Dec 10 11:07:16 crc kubenswrapper[4780]: I1210 11:07:16.235883 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerStarted","Data":"ff750c306d27dd9dbede016d176214582d494ad18e30da05f0f1b08541c1528f"} Dec 10 11:07:16 crc kubenswrapper[4780]: I1210 11:07:16.236507 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7lvcb" event={"ID":"d8ff2ff1-ecd6-4464-8366-1579f94d99d8","Type":"ContainerStarted","Data":"dbecb7752573023f6b44c71f687e2552b04e8f2dabd689609d003cedc5c2e8f3"} Dec 10 11:07:16 crc kubenswrapper[4780]: I1210 11:07:16.437233 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-xs4hm" Dec 10 11:07:17 crc kubenswrapper[4780]: I1210 11:07:17.253261 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jcfnl" event={"ID":"6974b1ff-a49a-4211-90a1-802e36919842","Type":"ContainerStarted","Data":"1f249b413d487f3fe82bacf1f3bbfafa7315231733968c0d38f53359804b5b44"} Dec 10 11:07:17 crc kubenswrapper[4780]: I1210 11:07:17.253697 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:07:17 crc kubenswrapper[4780]: I1210 11:07:17.297224 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-jcfnl" podStartSLOduration=10.077577624 podStartE2EDuration="22.297179865s" podCreationTimestamp="2025-12-10 11:06:55 +0000 UTC" firstStartedPulling="2025-12-10 11:06:56.682364203 +0000 UTC m=+1321.535757646" lastFinishedPulling="2025-12-10 11:07:08.901966444 +0000 UTC m=+1333.755359887" observedRunningTime="2025-12-10 11:07:17.285945054 +0000 UTC m=+1342.139338497" watchObservedRunningTime="2025-12-10 11:07:17.297179865 +0000 UTC m=+1342.150573308" Dec 10 11:07:21 crc kubenswrapper[4780]: I1210 11:07:21.307445 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:07:21 crc kubenswrapper[4780]: I1210 11:07:21.363704 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:07:22 crc kubenswrapper[4780]: I1210 11:07:22.298002 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-jcfnl" Dec 10 11:07:26 crc kubenswrapper[4780]: I1210 11:07:26.296646 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-cllk5" Dec 10 11:07:54 crc kubenswrapper[4780]: I1210 11:07:54.809599 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7lvcb" event={"ID":"d8ff2ff1-ecd6-4464-8366-1579f94d99d8","Type":"ContainerStarted","Data":"628e33ce5d4952beb61bf485c6a01a18d610d59af90baa883bb8fb2376d27efb"} Dec 10 11:07:54 crc kubenswrapper[4780]: I1210 11:07:54.837465 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-7lvcb" podStartSLOduration=2.87901856 podStartE2EDuration="40.837415389s" podCreationTimestamp="2025-12-10 11:07:14 +0000 UTC" firstStartedPulling="2025-12-10 11:07:15.226845108 +0000 UTC m=+1340.080238551" lastFinishedPulling="2025-12-10 11:07:53.185241897 +0000 UTC m=+1378.038635380" observedRunningTime="2025-12-10 11:07:54.829471949 +0000 UTC m=+1379.682865412" watchObservedRunningTime="2025-12-10 11:07:54.837415389 +0000 UTC m=+1379.690808832" Dec 10 11:07:55 crc kubenswrapper[4780]: I1210 11:07:55.826100 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8qhzf" event={"ID":"29e7c71f-7673-4198-97a0-fecf6cf69393","Type":"ContainerStarted","Data":"20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297"} Dec 10 11:07:55 crc kubenswrapper[4780]: I1210 11:07:55.826200 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-8qhzf" podUID="29e7c71f-7673-4198-97a0-fecf6cf69393" containerName="registry-server" containerID="cri-o://20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297" gracePeriod=2 Dec 10 11:07:55 crc kubenswrapper[4780]: I1210 11:07:55.865421 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-8qhzf" podStartSLOduration=2.497827978 podStartE2EDuration="45.86538063s" podCreationTimestamp="2025-12-10 11:07:10 +0000 UTC" firstStartedPulling="2025-12-10 11:07:11.527630271 +0000 UTC m=+1336.381023714" lastFinishedPulling="2025-12-10 11:07:54.895182923 +0000 UTC m=+1379.748576366" observedRunningTime="2025-12-10 11:07:55.853707266 +0000 UTC m=+1380.707100739" watchObservedRunningTime="2025-12-10 11:07:55.86538063 +0000 UTC m=+1380.718774073" Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.542396 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8qhzf" Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.673165 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92vz9\" (UniqueName: \"kubernetes.io/projected/29e7c71f-7673-4198-97a0-fecf6cf69393-kube-api-access-92vz9\") pod \"29e7c71f-7673-4198-97a0-fecf6cf69393\" (UID: \"29e7c71f-7673-4198-97a0-fecf6cf69393\") " Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.678287 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29e7c71f-7673-4198-97a0-fecf6cf69393-kube-api-access-92vz9" (OuterVolumeSpecName: "kube-api-access-92vz9") pod "29e7c71f-7673-4198-97a0-fecf6cf69393" (UID: "29e7c71f-7673-4198-97a0-fecf6cf69393"). InnerVolumeSpecName "kube-api-access-92vz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.776155 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92vz9\" (UniqueName: \"kubernetes.io/projected/29e7c71f-7673-4198-97a0-fecf6cf69393-kube-api-access-92vz9\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.835623 4780 generic.go:334] "Generic (PLEG): container finished" podID="29e7c71f-7673-4198-97a0-fecf6cf69393" containerID="20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297" exitCode=0 Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.835685 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8qhzf" Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.835704 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8qhzf" event={"ID":"29e7c71f-7673-4198-97a0-fecf6cf69393","Type":"ContainerDied","Data":"20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297"} Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.835765 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8qhzf" event={"ID":"29e7c71f-7673-4198-97a0-fecf6cf69393","Type":"ContainerDied","Data":"02e2ddc40c9684427610e703a76fa6809258302a329538dc32805823819829fe"} Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.835826 4780 scope.go:117] "RemoveContainer" containerID="20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297" Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.858729 4780 scope.go:117] "RemoveContainer" containerID="20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297" Dec 10 11:07:56 crc kubenswrapper[4780]: E1210 11:07:56.859296 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297\": container with ID starting with 20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297 not found: ID does not exist" containerID="20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297" Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.859370 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297"} err="failed to get container status \"20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297\": rpc error: code = NotFound desc = could not find container \"20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297\": container with ID starting with 20a9338578be119fe669695f7889af0501daaf02b966aad74e39da70c7592297 not found: ID does not exist" Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.873051 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8qhzf"] Dec 10 11:07:56 crc kubenswrapper[4780]: I1210 11:07:56.884102 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-8qhzf"] Dec 10 11:07:57 crc kubenswrapper[4780]: I1210 11:07:57.973990 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29e7c71f-7673-4198-97a0-fecf6cf69393" path="/var/lib/kubelet/pods/29e7c71f-7673-4198-97a0-fecf6cf69393/volumes" Dec 10 11:08:04 crc kubenswrapper[4780]: I1210 11:08:04.759067 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:08:04 crc kubenswrapper[4780]: I1210 11:08:04.761012 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:08:04 crc kubenswrapper[4780]: I1210 11:08:04.788738 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:08:04 crc kubenswrapper[4780]: I1210 11:08:04.941774 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-7lvcb" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.442233 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58"] Dec 10 11:08:34 crc kubenswrapper[4780]: E1210 11:08:34.443592 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29e7c71f-7673-4198-97a0-fecf6cf69393" containerName="registry-server" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.443627 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="29e7c71f-7673-4198-97a0-fecf6cf69393" containerName="registry-server" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.443939 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="29e7c71f-7673-4198-97a0-fecf6cf69393" containerName="registry-server" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.461718 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.468992 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-pf27m" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.495073 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58"] Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.629838 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-util\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.631178 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-bundle\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.631400 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l57wq\" (UniqueName: \"kubernetes.io/projected/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-kube-api-access-l57wq\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.734300 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-util\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.735099 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-util\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.735687 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-bundle\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.736028 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-bundle\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.736145 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l57wq\" (UniqueName: \"kubernetes.io/projected/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-kube-api-access-l57wq\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.776897 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l57wq\" (UniqueName: \"kubernetes.io/projected/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-kube-api-access-l57wq\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:34 crc kubenswrapper[4780]: I1210 11:08:34.803686 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:35 crc kubenswrapper[4780]: I1210 11:08:35.520851 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58"] Dec 10 11:08:36 crc kubenswrapper[4780]: I1210 11:08:36.359857 4780 generic.go:334] "Generic (PLEG): container finished" podID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerID="3e26165e65946119781c51bf277bb22e351a10c7e1d291da6b7627bb18942d6b" exitCode=0 Dec 10 11:08:36 crc kubenswrapper[4780]: I1210 11:08:36.359967 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" event={"ID":"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7","Type":"ContainerDied","Data":"3e26165e65946119781c51bf277bb22e351a10c7e1d291da6b7627bb18942d6b"} Dec 10 11:08:36 crc kubenswrapper[4780]: I1210 11:08:36.360326 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" event={"ID":"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7","Type":"ContainerStarted","Data":"e81bfe50a6e5002324c5a7f5bb90b79eabe9b708cd23390566cf591f7d8682ea"} Dec 10 11:08:39 crc kubenswrapper[4780]: I1210 11:08:39.934773 4780 generic.go:334] "Generic (PLEG): container finished" podID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerID="752414e3cd37633f79919c5fb4141b1ff59a7d1dd6ac06f552fb90c042e88a82" exitCode=0 Dec 10 11:08:39 crc kubenswrapper[4780]: I1210 11:08:39.935591 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" event={"ID":"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7","Type":"ContainerDied","Data":"752414e3cd37633f79919c5fb4141b1ff59a7d1dd6ac06f552fb90c042e88a82"} Dec 10 11:08:40 crc kubenswrapper[4780]: I1210 11:08:40.949758 4780 generic.go:334] "Generic (PLEG): container finished" podID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerID="88b48368bbb971d0bb3da22e8d09e367a69aa60b942c73731df883cd21568b1e" exitCode=0 Dec 10 11:08:40 crc kubenswrapper[4780]: I1210 11:08:40.949821 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" event={"ID":"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7","Type":"ContainerDied","Data":"88b48368bbb971d0bb3da22e8d09e367a69aa60b942c73731df883cd21568b1e"} Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.854563 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.962790 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-util\") pod \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.962990 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l57wq\" (UniqueName: \"kubernetes.io/projected/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-kube-api-access-l57wq\") pod \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.963034 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-bundle\") pod \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\" (UID: \"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7\") " Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.964235 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-bundle" (OuterVolumeSpecName: "bundle") pod "d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" (UID: "d1bfcf03-5ce1-40ab-b3bd-072420cc36f7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.971968 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-kube-api-access-l57wq" (OuterVolumeSpecName: "kube-api-access-l57wq") pod "d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" (UID: "d1bfcf03-5ce1-40ab-b3bd-072420cc36f7"). InnerVolumeSpecName "kube-api-access-l57wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.973398 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" event={"ID":"d1bfcf03-5ce1-40ab-b3bd-072420cc36f7","Type":"ContainerDied","Data":"e81bfe50a6e5002324c5a7f5bb90b79eabe9b708cd23390566cf591f7d8682ea"} Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.973648 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e81bfe50a6e5002324c5a7f5bb90b79eabe9b708cd23390566cf591f7d8682ea" Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.973541 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58" Dec 10 11:08:42 crc kubenswrapper[4780]: I1210 11:08:42.975271 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-util" (OuterVolumeSpecName: "util") pod "d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" (UID: "d1bfcf03-5ce1-40ab-b3bd-072420cc36f7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:43 crc kubenswrapper[4780]: I1210 11:08:43.164974 4780 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-util\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:43 crc kubenswrapper[4780]: I1210 11:08:43.165083 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l57wq\" (UniqueName: \"kubernetes.io/projected/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-kube-api-access-l57wq\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:43 crc kubenswrapper[4780]: I1210 11:08:43.165180 4780 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1bfcf03-5ce1-40ab-b3bd-072420cc36f7-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.757014 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4"] Dec 10 11:08:48 crc kubenswrapper[4780]: E1210 11:08:48.758773 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerName="util" Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.758805 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerName="util" Dec 10 11:08:48 crc kubenswrapper[4780]: E1210 11:08:48.758823 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerName="pull" Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.758830 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerName="pull" Dec 10 11:08:48 crc kubenswrapper[4780]: E1210 11:08:48.758856 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerName="extract" Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.758868 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerName="extract" Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.759319 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1bfcf03-5ce1-40ab-b3bd-072420cc36f7" containerName="extract" Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.760583 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.771320 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-22sq7" Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.808112 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4"] Dec 10 11:08:48 crc kubenswrapper[4780]: I1210 11:08:48.939384 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx8sg\" (UniqueName: \"kubernetes.io/projected/033217b3-d237-4cd5-8aae-f99edb1dec27-kube-api-access-hx8sg\") pod \"openstack-operator-controller-operator-7b77d4dbbf-qd7x4\" (UID: \"033217b3-d237-4cd5-8aae-f99edb1dec27\") " pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" Dec 10 11:08:49 crc kubenswrapper[4780]: I1210 11:08:49.042131 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx8sg\" (UniqueName: \"kubernetes.io/projected/033217b3-d237-4cd5-8aae-f99edb1dec27-kube-api-access-hx8sg\") pod \"openstack-operator-controller-operator-7b77d4dbbf-qd7x4\" (UID: \"033217b3-d237-4cd5-8aae-f99edb1dec27\") " pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" Dec 10 11:08:49 crc kubenswrapper[4780]: I1210 11:08:49.077240 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx8sg\" (UniqueName: \"kubernetes.io/projected/033217b3-d237-4cd5-8aae-f99edb1dec27-kube-api-access-hx8sg\") pod \"openstack-operator-controller-operator-7b77d4dbbf-qd7x4\" (UID: \"033217b3-d237-4cd5-8aae-f99edb1dec27\") " pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" Dec 10 11:08:49 crc kubenswrapper[4780]: I1210 11:08:49.103741 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" Dec 10 11:08:49 crc kubenswrapper[4780]: I1210 11:08:49.824775 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4"] Dec 10 11:08:49 crc kubenswrapper[4780]: I1210 11:08:49.838089 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:08:50 crc kubenswrapper[4780]: I1210 11:08:50.444865 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" event={"ID":"033217b3-d237-4cd5-8aae-f99edb1dec27","Type":"ContainerStarted","Data":"64cf349de58254ae1bb15ec95e79864fdb327ad95f441c8f00106602da5b1374"} Dec 10 11:08:57 crc kubenswrapper[4780]: I1210 11:08:57.476295 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:08:57 crc kubenswrapper[4780]: I1210 11:08:57.477144 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:08:58 crc kubenswrapper[4780]: I1210 11:08:58.698239 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" event={"ID":"033217b3-d237-4cd5-8aae-f99edb1dec27","Type":"ContainerStarted","Data":"02b99a15fde8cee96dfc322178edc97a8492f5cc32d2f730c499ff9ee03be8ee"} Dec 10 11:08:58 crc kubenswrapper[4780]: I1210 11:08:58.698655 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" Dec 10 11:08:58 crc kubenswrapper[4780]: I1210 11:08:58.727360 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" podStartSLOduration=2.369661718 podStartE2EDuration="10.727300733s" podCreationTimestamp="2025-12-10 11:08:48 +0000 UTC" firstStartedPulling="2025-12-10 11:08:49.837741601 +0000 UTC m=+1434.691135044" lastFinishedPulling="2025-12-10 11:08:58.195380616 +0000 UTC m=+1443.048774059" observedRunningTime="2025-12-10 11:08:58.726142954 +0000 UTC m=+1443.579536397" watchObservedRunningTime="2025-12-10 11:08:58.727300733 +0000 UTC m=+1443.580694166" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.124814 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-stnbb"] Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.141112 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-stnbb"] Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.141261 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.187827 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-catalog-content\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.187943 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-utilities\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.188186 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4bjf\" (UniqueName: \"kubernetes.io/projected/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-kube-api-access-t4bjf\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.291398 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-catalog-content\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.291530 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-utilities\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.291616 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4bjf\" (UniqueName: \"kubernetes.io/projected/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-kube-api-access-t4bjf\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.292405 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-catalog-content\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.292474 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-utilities\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.318912 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4bjf\" (UniqueName: \"kubernetes.io/projected/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-kube-api-access-t4bjf\") pod \"redhat-operators-stnbb\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:03 crc kubenswrapper[4780]: I1210 11:09:03.470330 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:04 crc kubenswrapper[4780]: I1210 11:09:04.026233 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-stnbb"] Dec 10 11:09:04 crc kubenswrapper[4780]: I1210 11:09:04.759344 4780 generic.go:334] "Generic (PLEG): container finished" podID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerID="a24ee3d54b6e86d5c4e2b562ce861fbc357612f21ed2cc2e11a61236751f4e6a" exitCode=0 Dec 10 11:09:04 crc kubenswrapper[4780]: I1210 11:09:04.759662 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stnbb" event={"ID":"85f0c8ff-ca53-4096-9edc-9a42cbed9ced","Type":"ContainerDied","Data":"a24ee3d54b6e86d5c4e2b562ce861fbc357612f21ed2cc2e11a61236751f4e6a"} Dec 10 11:09:04 crc kubenswrapper[4780]: I1210 11:09:04.759696 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stnbb" event={"ID":"85f0c8ff-ca53-4096-9edc-9a42cbed9ced","Type":"ContainerStarted","Data":"6c766d999dff0af88edd6fe1a68647fac3ebe9e69bbeb55323095c95693f1d6c"} Dec 10 11:09:05 crc kubenswrapper[4780]: I1210 11:09:05.782157 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stnbb" event={"ID":"85f0c8ff-ca53-4096-9edc-9a42cbed9ced","Type":"ContainerStarted","Data":"f000ff675066cbdda25c49264f01ccdb17023aa804e9d1831dd3cf3cc228f4a5"} Dec 10 11:09:08 crc kubenswrapper[4780]: I1210 11:09:08.825008 4780 generic.go:334] "Generic (PLEG): container finished" podID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerID="f000ff675066cbdda25c49264f01ccdb17023aa804e9d1831dd3cf3cc228f4a5" exitCode=0 Dec 10 11:09:08 crc kubenswrapper[4780]: I1210 11:09:08.825645 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stnbb" event={"ID":"85f0c8ff-ca53-4096-9edc-9a42cbed9ced","Type":"ContainerDied","Data":"f000ff675066cbdda25c49264f01ccdb17023aa804e9d1831dd3cf3cc228f4a5"} Dec 10 11:09:09 crc kubenswrapper[4780]: I1210 11:09:09.108038 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-qd7x4" Dec 10 11:09:09 crc kubenswrapper[4780]: I1210 11:09:09.838983 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stnbb" event={"ID":"85f0c8ff-ca53-4096-9edc-9a42cbed9ced","Type":"ContainerStarted","Data":"ba18a73b3ae3b4d63a49cf3e4aa3b3dd2b052d3266c656650a7ae474a1a293f1"} Dec 10 11:09:09 crc kubenswrapper[4780]: I1210 11:09:09.869117 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-stnbb" podStartSLOduration=2.124660711 podStartE2EDuration="6.869078278s" podCreationTimestamp="2025-12-10 11:09:03 +0000 UTC" firstStartedPulling="2025-12-10 11:09:04.761554422 +0000 UTC m=+1449.614947865" lastFinishedPulling="2025-12-10 11:09:09.505971989 +0000 UTC m=+1454.359365432" observedRunningTime="2025-12-10 11:09:09.860243625 +0000 UTC m=+1454.713637078" watchObservedRunningTime="2025-12-10 11:09:09.869078278 +0000 UTC m=+1454.722471721" Dec 10 11:09:13 crc kubenswrapper[4780]: I1210 11:09:13.472198 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:13 crc kubenswrapper[4780]: I1210 11:09:13.472761 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:14 crc kubenswrapper[4780]: I1210 11:09:14.529295 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-stnbb" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="registry-server" probeResult="failure" output=< Dec 10 11:09:14 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:09:14 crc kubenswrapper[4780]: > Dec 10 11:09:23 crc kubenswrapper[4780]: I1210 11:09:23.605019 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:23 crc kubenswrapper[4780]: I1210 11:09:23.682376 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:27 crc kubenswrapper[4780]: I1210 11:09:27.475244 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:09:27 crc kubenswrapper[4780]: I1210 11:09:27.475727 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:09:28 crc kubenswrapper[4780]: I1210 11:09:28.235695 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-stnbb"] Dec 10 11:09:28 crc kubenswrapper[4780]: I1210 11:09:28.240425 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-stnbb" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="registry-server" containerID="cri-o://ba18a73b3ae3b4d63a49cf3e4aa3b3dd2b052d3266c656650a7ae474a1a293f1" gracePeriod=2 Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.026973 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stnbb" event={"ID":"85f0c8ff-ca53-4096-9edc-9a42cbed9ced","Type":"ContainerDied","Data":"ba18a73b3ae3b4d63a49cf3e4aa3b3dd2b052d3266c656650a7ae474a1a293f1"} Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.026970 4780 generic.go:334] "Generic (PLEG): container finished" podID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerID="ba18a73b3ae3b4d63a49cf3e4aa3b3dd2b052d3266c656650a7ae474a1a293f1" exitCode=0 Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.412861 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.643685 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-catalog-content\") pod \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.644220 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-utilities\") pod \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.644477 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4bjf\" (UniqueName: \"kubernetes.io/projected/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-kube-api-access-t4bjf\") pod \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\" (UID: \"85f0c8ff-ca53-4096-9edc-9a42cbed9ced\") " Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.649069 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-utilities" (OuterVolumeSpecName: "utilities") pod "85f0c8ff-ca53-4096-9edc-9a42cbed9ced" (UID: "85f0c8ff-ca53-4096-9edc-9a42cbed9ced"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.665433 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-kube-api-access-t4bjf" (OuterVolumeSpecName: "kube-api-access-t4bjf") pod "85f0c8ff-ca53-4096-9edc-9a42cbed9ced" (UID: "85f0c8ff-ca53-4096-9edc-9a42cbed9ced"). InnerVolumeSpecName "kube-api-access-t4bjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.755608 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.755665 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4bjf\" (UniqueName: \"kubernetes.io/projected/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-kube-api-access-t4bjf\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.816838 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85f0c8ff-ca53-4096-9edc-9a42cbed9ced" (UID: "85f0c8ff-ca53-4096-9edc-9a42cbed9ced"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:29 crc kubenswrapper[4780]: I1210 11:09:29.858052 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85f0c8ff-ca53-4096-9edc-9a42cbed9ced-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:30 crc kubenswrapper[4780]: I1210 11:09:30.045304 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stnbb" event={"ID":"85f0c8ff-ca53-4096-9edc-9a42cbed9ced","Type":"ContainerDied","Data":"6c766d999dff0af88edd6fe1a68647fac3ebe9e69bbeb55323095c95693f1d6c"} Dec 10 11:09:30 crc kubenswrapper[4780]: I1210 11:09:30.045548 4780 scope.go:117] "RemoveContainer" containerID="ba18a73b3ae3b4d63a49cf3e4aa3b3dd2b052d3266c656650a7ae474a1a293f1" Dec 10 11:09:30 crc kubenswrapper[4780]: I1210 11:09:30.045862 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-stnbb" Dec 10 11:09:30 crc kubenswrapper[4780]: I1210 11:09:30.079707 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-stnbb"] Dec 10 11:09:30 crc kubenswrapper[4780]: I1210 11:09:30.086170 4780 scope.go:117] "RemoveContainer" containerID="f000ff675066cbdda25c49264f01ccdb17023aa804e9d1831dd3cf3cc228f4a5" Dec 10 11:09:30 crc kubenswrapper[4780]: I1210 11:09:30.091324 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-stnbb"] Dec 10 11:09:30 crc kubenswrapper[4780]: I1210 11:09:30.119768 4780 scope.go:117] "RemoveContainer" containerID="a24ee3d54b6e86d5c4e2b562ce861fbc357612f21ed2cc2e11a61236751f4e6a" Dec 10 11:09:32 crc kubenswrapper[4780]: I1210 11:09:32.156228 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" path="/var/lib/kubelet/pods/85f0c8ff-ca53-4096-9edc-9a42cbed9ced/volumes" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.754649 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd"] Dec 10 11:09:33 crc kubenswrapper[4780]: E1210 11:09:33.756242 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="registry-server" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.756299 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="registry-server" Dec 10 11:09:33 crc kubenswrapper[4780]: E1210 11:09:33.756344 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="extract-content" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.756352 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="extract-content" Dec 10 11:09:33 crc kubenswrapper[4780]: E1210 11:09:33.756401 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="extract-utilities" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.756411 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="extract-utilities" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.756750 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="85f0c8ff-ca53-4096-9edc-9a42cbed9ced" containerName="registry-server" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.758731 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.763825 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw"] Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.764555 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-54qzm" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.766249 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.768617 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-pbhlg" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.802308 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd"] Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.837030 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw"] Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.854866 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx"] Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.857352 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.887769 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbkvz\" (UniqueName: \"kubernetes.io/projected/6f18f8cf-e493-41bd-92e6-a7714992854d-kube-api-access-zbkvz\") pod \"barbican-operator-controller-manager-7d9dfd778-rnfbd\" (UID: \"6f18f8cf-e493-41bd-92e6-a7714992854d\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.887978 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29ff7\" (UniqueName: \"kubernetes.io/projected/5a684cfd-18e4-4f16-a0dd-73f2238cce27-kube-api-access-29ff7\") pod \"cinder-operator-controller-manager-6c677c69b-nkdgw\" (UID: \"5a684cfd-18e4-4f16-a0dd-73f2238cce27\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.897287 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-ckxvv" Dec 10 11:09:33 crc kubenswrapper[4780]: I1210 11:09:33.912626 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.000877 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29ff7\" (UniqueName: \"kubernetes.io/projected/5a684cfd-18e4-4f16-a0dd-73f2238cce27-kube-api-access-29ff7\") pod \"cinder-operator-controller-manager-6c677c69b-nkdgw\" (UID: \"5a684cfd-18e4-4f16-a0dd-73f2238cce27\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.001202 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbkvz\" (UniqueName: \"kubernetes.io/projected/6f18f8cf-e493-41bd-92e6-a7714992854d-kube-api-access-zbkvz\") pod \"barbican-operator-controller-manager-7d9dfd778-rnfbd\" (UID: \"6f18f8cf-e493-41bd-92e6-a7714992854d\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.001301 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgrvs\" (UniqueName: \"kubernetes.io/projected/77993ff6-b277-4ef9-a00d-08a47d02d483-kube-api-access-hgrvs\") pod \"designate-operator-controller-manager-697fb699cf-h6ggx\" (UID: \"77993ff6-b277-4ef9-a00d-08a47d02d483\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.007083 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.008533 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.012241 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.017715 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-bbkzc" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.024691 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.034935 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-pkpp9" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.056788 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbkvz\" (UniqueName: \"kubernetes.io/projected/6f18f8cf-e493-41bd-92e6-a7714992854d-kube-api-access-zbkvz\") pod \"barbican-operator-controller-manager-7d9dfd778-rnfbd\" (UID: \"6f18f8cf-e493-41bd-92e6-a7714992854d\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.060396 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29ff7\" (UniqueName: \"kubernetes.io/projected/5a684cfd-18e4-4f16-a0dd-73f2238cce27-kube-api-access-29ff7\") pod \"cinder-operator-controller-manager-6c677c69b-nkdgw\" (UID: \"5a684cfd-18e4-4f16-a0dd-73f2238cce27\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.074950 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.099578 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.101803 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.104261 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmtkf\" (UniqueName: \"kubernetes.io/projected/d235302b-56b1-4515-9f26-4f0ea884aa87-kube-api-access-rmtkf\") pod \"heat-operator-controller-manager-5f64f6f8bb-csmkt\" (UID: \"d235302b-56b1-4515-9f26-4f0ea884aa87\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.104515 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgrvs\" (UniqueName: \"kubernetes.io/projected/77993ff6-b277-4ef9-a00d-08a47d02d483-kube-api-access-hgrvs\") pod \"designate-operator-controller-manager-697fb699cf-h6ggx\" (UID: \"77993ff6-b277-4ef9-a00d-08a47d02d483\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.104709 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cmb9\" (UniqueName: \"kubernetes.io/projected/16c6406a-69c5-4365-81d9-8bf51365cd08-kube-api-access-6cmb9\") pod \"glance-operator-controller-manager-5697bb5779-lx86j\" (UID: \"16c6406a-69c5-4365-81d9-8bf51365cd08\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.115234 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.117209 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.118656 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.126374 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.131658 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-588tn" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.215373 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgrvs\" (UniqueName: \"kubernetes.io/projected/77993ff6-b277-4ef9-a00d-08a47d02d483-kube-api-access-hgrvs\") pod \"designate-operator-controller-manager-697fb699cf-h6ggx\" (UID: \"77993ff6-b277-4ef9-a00d-08a47d02d483\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.216854 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.222523 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cmb9\" (UniqueName: \"kubernetes.io/projected/16c6406a-69c5-4365-81d9-8bf51365cd08-kube-api-access-6cmb9\") pod \"glance-operator-controller-manager-5697bb5779-lx86j\" (UID: \"16c6406a-69c5-4365-81d9-8bf51365cd08\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.222705 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmtkf\" (UniqueName: \"kubernetes.io/projected/d235302b-56b1-4515-9f26-4f0ea884aa87-kube-api-access-rmtkf\") pod \"heat-operator-controller-manager-5f64f6f8bb-csmkt\" (UID: \"d235302b-56b1-4515-9f26-4f0ea884aa87\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.222758 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nd49x\" (UniqueName: \"kubernetes.io/projected/0dbb81a9-f820-447a-a475-911ae4a53034-kube-api-access-nd49x\") pod \"horizon-operator-controller-manager-68c6d99b8f-896x8\" (UID: \"0dbb81a9-f820-447a-a475-911ae4a53034\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.262217 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.276529 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.300872 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.317877 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmtkf\" (UniqueName: \"kubernetes.io/projected/d235302b-56b1-4515-9f26-4f0ea884aa87-kube-api-access-rmtkf\") pod \"heat-operator-controller-manager-5f64f6f8bb-csmkt\" (UID: \"d235302b-56b1-4515-9f26-4f0ea884aa87\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.334737 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-65njt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.384905 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkq5m\" (UniqueName: \"kubernetes.io/projected/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-kube-api-access-dkq5m\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.427373 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cmb9\" (UniqueName: \"kubernetes.io/projected/16c6406a-69c5-4365-81d9-8bf51365cd08-kube-api-access-6cmb9\") pod \"glance-operator-controller-manager-5697bb5779-lx86j\" (UID: \"16c6406a-69c5-4365-81d9-8bf51365cd08\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.432629 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nd49x\" (UniqueName: \"kubernetes.io/projected/0dbb81a9-f820-447a-a475-911ae4a53034-kube-api-access-nd49x\") pod \"horizon-operator-controller-manager-68c6d99b8f-896x8\" (UID: \"0dbb81a9-f820-447a-a475-911ae4a53034\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.433225 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-84x72"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.434329 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.435318 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.449071 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.457171 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-bxxnx" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.477341 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nd49x\" (UniqueName: \"kubernetes.io/projected/0dbb81a9-f820-447a-a475-911ae4a53034-kube-api-access-nd49x\") pod \"horizon-operator-controller-manager-68c6d99b8f-896x8\" (UID: \"0dbb81a9-f820-447a-a475-911ae4a53034\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.479540 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.482148 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.493033 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-rksvb" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.514535 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-84x72"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.539399 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wcxj\" (UniqueName: \"kubernetes.io/projected/9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10-kube-api-access-5wcxj\") pod \"keystone-operator-controller-manager-7765d96ddf-v6fxt\" (UID: \"9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.540385 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkq5m\" (UniqueName: \"kubernetes.io/projected/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-kube-api-access-dkq5m\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.540649 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5sr9\" (UniqueName: \"kubernetes.io/projected/5e979946-7a11-46af-ab82-77bae1669169-kube-api-access-h5sr9\") pod \"ironic-operator-controller-manager-967d97867-84x72\" (UID: \"5e979946-7a11-46af-ab82-77bae1669169\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.540853 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:34 crc kubenswrapper[4780]: E1210 11:09:34.541304 4780 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:34 crc kubenswrapper[4780]: E1210 11:09:34.541518 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert podName:e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:35.041442228 +0000 UTC m=+1479.894835671 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert") pod "infra-operator-controller-manager-78d48bff9d-d2txd" (UID: "e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.556056 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.573753 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkq5m\" (UniqueName: \"kubernetes.io/projected/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-kube-api-access-dkq5m\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.573847 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.579183 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.586083 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.589253 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-gwcbc" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.631039 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.649490 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.651287 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.655163 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.655412 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wcxj\" (UniqueName: \"kubernetes.io/projected/9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10-kube-api-access-5wcxj\") pod \"keystone-operator-controller-manager-7765d96ddf-v6fxt\" (UID: \"9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.668312 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5sr9\" (UniqueName: \"kubernetes.io/projected/5e979946-7a11-46af-ab82-77bae1669169-kube-api-access-h5sr9\") pod \"ironic-operator-controller-manager-967d97867-84x72\" (UID: \"5e979946-7a11-46af-ab82-77bae1669169\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.664593 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-zhl95" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.658965 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.719596 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.729156 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.730676 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.734991 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-tgng6" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.746494 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wcxj\" (UniqueName: \"kubernetes.io/projected/9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10-kube-api-access-5wcxj\") pod \"keystone-operator-controller-manager-7765d96ddf-v6fxt\" (UID: \"9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.747194 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.761700 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.768669 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-kjwgl" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.780789 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm57l\" (UniqueName: \"kubernetes.io/projected/b1ef4b52-99f2-4257-97d4-bdb6f871f73f-kube-api-access-nm57l\") pod \"mariadb-operator-controller-manager-79c8c4686c-jxsxr\" (UID: \"b1ef4b52-99f2-4257-97d4-bdb6f871f73f\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.781107 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5g2cb\" (UniqueName: \"kubernetes.io/projected/0f7ed694-a606-482f-90ca-bbe99437b5f7-kube-api-access-5g2cb\") pod \"manila-operator-controller-manager-5b5fd79c9c-jb52h\" (UID: \"0f7ed694-a606-482f-90ca-bbe99437b5f7\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.781197 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdzrz\" (UniqueName: \"kubernetes.io/projected/bbfacb89-13e0-45ef-853a-1faf76e014d7-kube-api-access-jdzrz\") pod \"nova-operator-controller-manager-697bc559fc-lgmzm\" (UID: \"bbfacb89-13e0-45ef-853a-1faf76e014d7\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.781234 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcb8v\" (UniqueName: \"kubernetes.io/projected/3a1cc2ce-3a32-447a-8824-7ec8c84b1613-kube-api-access-jcb8v\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-nlzr9\" (UID: \"3a1cc2ce-3a32-447a-8824-7ec8c84b1613\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.784892 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq"] Dec 10 11:09:34 crc kubenswrapper[4780]: I1210 11:09:34.788411 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5sr9\" (UniqueName: \"kubernetes.io/projected/5e979946-7a11-46af-ab82-77bae1669169-kube-api-access-h5sr9\") pod \"ironic-operator-controller-manager-967d97867-84x72\" (UID: \"5e979946-7a11-46af-ab82-77bae1669169\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.379270 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.386334 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.393087 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.410729 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm57l\" (UniqueName: \"kubernetes.io/projected/b1ef4b52-99f2-4257-97d4-bdb6f871f73f-kube-api-access-nm57l\") pod \"mariadb-operator-controller-manager-79c8c4686c-jxsxr\" (UID: \"b1ef4b52-99f2-4257-97d4-bdb6f871f73f\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.411129 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5g2cb\" (UniqueName: \"kubernetes.io/projected/0f7ed694-a606-482f-90ca-bbe99437b5f7-kube-api-access-5g2cb\") pod \"manila-operator-controller-manager-5b5fd79c9c-jb52h\" (UID: \"0f7ed694-a606-482f-90ca-bbe99437b5f7\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.411273 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdzrz\" (UniqueName: \"kubernetes.io/projected/bbfacb89-13e0-45ef-853a-1faf76e014d7-kube-api-access-jdzrz\") pod \"nova-operator-controller-manager-697bc559fc-lgmzm\" (UID: \"bbfacb89-13e0-45ef-853a-1faf76e014d7\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.411319 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcb8v\" (UniqueName: \"kubernetes.io/projected/3a1cc2ce-3a32-447a-8824-7ec8c84b1613-kube-api-access-jcb8v\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-nlzr9\" (UID: \"3a1cc2ce-3a32-447a-8824-7ec8c84b1613\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" Dec 10 11:09:35 crc kubenswrapper[4780]: E1210 11:09:35.393216 4780 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:35 crc kubenswrapper[4780]: E1210 11:09:35.412006 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert podName:e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:36.411982638 +0000 UTC m=+1481.265376081 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert") pod "infra-operator-controller-manager-78d48bff9d-d2txd" (UID: "e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.394285 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.394436 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.434297 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.434963 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-8x475" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.464587 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.466024 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdzrz\" (UniqueName: \"kubernetes.io/projected/bbfacb89-13e0-45ef-853a-1faf76e014d7-kube-api-access-jdzrz\") pod \"nova-operator-controller-manager-697bc559fc-lgmzm\" (UID: \"bbfacb89-13e0-45ef-853a-1faf76e014d7\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.474595 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5g2cb\" (UniqueName: \"kubernetes.io/projected/0f7ed694-a606-482f-90ca-bbe99437b5f7-kube-api-access-5g2cb\") pod \"manila-operator-controller-manager-5b5fd79c9c-jb52h\" (UID: \"0f7ed694-a606-482f-90ca-bbe99437b5f7\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.482502 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcb8v\" (UniqueName: \"kubernetes.io/projected/3a1cc2ce-3a32-447a-8824-7ec8c84b1613-kube-api-access-jcb8v\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-nlzr9\" (UID: \"3a1cc2ce-3a32-447a-8824-7ec8c84b1613\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.501511 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm57l\" (UniqueName: \"kubernetes.io/projected/b1ef4b52-99f2-4257-97d4-bdb6f871f73f-kube-api-access-nm57l\") pod \"mariadb-operator-controller-manager-79c8c4686c-jxsxr\" (UID: \"b1ef4b52-99f2-4257-97d4-bdb6f871f73f\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.521844 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hddqp\" (UniqueName: \"kubernetes.io/projected/23cc63a0-e8c1-49bc-9762-daa6d315409e-kube-api-access-hddqp\") pod \"octavia-operator-controller-manager-998648c74-zs8hq\" (UID: \"23cc63a0-e8c1-49bc-9762-daa6d315409e\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.528333 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.596270 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.620586 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.623398 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.624049 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hddqp\" (UniqueName: \"kubernetes.io/projected/23cc63a0-e8c1-49bc-9762-daa6d315409e-kube-api-access-hddqp\") pod \"octavia-operator-controller-manager-998648c74-zs8hq\" (UID: \"23cc63a0-e8c1-49bc-9762-daa6d315409e\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.640564 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.640564 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-dklxr" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.664560 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.667102 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.672236 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-p7bms" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.673632 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hddqp\" (UniqueName: \"kubernetes.io/projected/23cc63a0-e8c1-49bc-9762-daa6d315409e-kube-api-access-hddqp\") pod \"octavia-operator-controller-manager-998648c74-zs8hq\" (UID: \"23cc63a0-e8c1-49bc-9762-daa6d315409e\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.675486 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.700883 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.713222 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.716970 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.724817 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-44jmm" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.726271 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgq5x\" (UniqueName: \"kubernetes.io/projected/7d98f853-1b52-438c-a5d1-6fe334794a35-kube-api-access-sgq5x\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.726499 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nvcv\" (UniqueName: \"kubernetes.io/projected/b88956c1-b60b-4a6f-948a-de685134880f-kube-api-access-4nvcv\") pod \"ovn-operator-controller-manager-b6456fdb6-jwjxx\" (UID: \"b88956c1-b60b-4a6f-948a-de685134880f\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.726584 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.728197 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.738372 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx"] Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.745504 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" Dec 10 11:09:35 crc kubenswrapper[4780]: I1210 11:09:35.788512 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.017752 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nvcv\" (UniqueName: \"kubernetes.io/projected/b88956c1-b60b-4a6f-948a-de685134880f-kube-api-access-4nvcv\") pod \"ovn-operator-controller-manager-b6456fdb6-jwjxx\" (UID: \"b88956c1-b60b-4a6f-948a-de685134880f\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.018038 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.018177 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njml6\" (UniqueName: \"kubernetes.io/projected/42edfebc-9a83-460f-9bb9-50172b9763d3-kube-api-access-njml6\") pod \"placement-operator-controller-manager-78f8948974-tgbxl\" (UID: \"42edfebc-9a83-460f-9bb9-50172b9763d3\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.018299 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgq5x\" (UniqueName: \"kubernetes.io/projected/7d98f853-1b52-438c-a5d1-6fe334794a35-kube-api-access-sgq5x\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:36 crc kubenswrapper[4780]: E1210 11:09:36.019829 4780 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:36 crc kubenswrapper[4780]: E1210 11:09:36.019909 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert podName:7d98f853-1b52-438c-a5d1-6fe334794a35 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:36.519882327 +0000 UTC m=+1481.373275770 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fkqjsw" (UID: "7d98f853-1b52-438c-a5d1-6fe334794a35") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.020412 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.036071 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-bnnl4" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.105702 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nvcv\" (UniqueName: \"kubernetes.io/projected/b88956c1-b60b-4a6f-948a-de685134880f-kube-api-access-4nvcv\") pod \"ovn-operator-controller-manager-b6456fdb6-jwjxx\" (UID: \"b88956c1-b60b-4a6f-948a-de685134880f\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.500817 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgq5x\" (UniqueName: \"kubernetes.io/projected/7d98f853-1b52-438c-a5d1-6fe334794a35-kube-api-access-sgq5x\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.532631 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.532728 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.532782 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8wcq\" (UniqueName: \"kubernetes.io/projected/47ef7d7b-4052-4068-adef-b6a94353f980-kube-api-access-c8wcq\") pod \"swift-operator-controller-manager-9d58d64bc-t764b\" (UID: \"47ef7d7b-4052-4068-adef-b6a94353f980\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.532830 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njml6\" (UniqueName: \"kubernetes.io/projected/42edfebc-9a83-460f-9bb9-50172b9763d3-kube-api-access-njml6\") pod \"placement-operator-controller-manager-78f8948974-tgbxl\" (UID: \"42edfebc-9a83-460f-9bb9-50172b9763d3\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" Dec 10 11:09:36 crc kubenswrapper[4780]: E1210 11:09:36.533136 4780 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:36 crc kubenswrapper[4780]: E1210 11:09:36.533190 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert podName:e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:38.533172615 +0000 UTC m=+1483.386566058 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert") pod "infra-operator-controller-manager-78d48bff9d-d2txd" (UID: "e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:36 crc kubenswrapper[4780]: E1210 11:09:36.533680 4780 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:36 crc kubenswrapper[4780]: E1210 11:09:36.533721 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert podName:7d98f853-1b52-438c-a5d1-6fe334794a35 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:37.533709759 +0000 UTC m=+1482.387103202 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fkqjsw" (UID: "7d98f853-1b52-438c-a5d1-6fe334794a35") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.553197 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.626777 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.626837 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.628800 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.629547 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.631698 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.632909 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-dgdhk" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.633262 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.635658 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8wcq\" (UniqueName: \"kubernetes.io/projected/47ef7d7b-4052-4068-adef-b6a94353f980-kube-api-access-c8wcq\") pod \"swift-operator-controller-manager-9d58d64bc-t764b\" (UID: \"47ef7d7b-4052-4068-adef-b6a94353f980\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.637571 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-s2pl9" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.638166 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.638185 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.638256 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.649812 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-hgxph" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.657421 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.661837 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njml6\" (UniqueName: \"kubernetes.io/projected/42edfebc-9a83-460f-9bb9-50172b9763d3-kube-api-access-njml6\") pod \"placement-operator-controller-manager-78f8948974-tgbxl\" (UID: \"42edfebc-9a83-460f-9bb9-50172b9763d3\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.672765 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8wcq\" (UniqueName: \"kubernetes.io/projected/47ef7d7b-4052-4068-adef-b6a94353f980-kube-api-access-c8wcq\") pod \"swift-operator-controller-manager-9d58d64bc-t764b\" (UID: \"47ef7d7b-4052-4068-adef-b6a94353f980\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.696619 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.699513 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.702834 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.703538 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.708146 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-cqn8b" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.731877 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn"] Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.741604 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldvtn\" (UniqueName: \"kubernetes.io/projected/de0be1fa-33a0-44ad-9aed-c791a447510a-kube-api-access-ldvtn\") pod \"test-operator-controller-manager-5854674fcc-4vbhw\" (UID: \"de0be1fa-33a0-44ad-9aed-c791a447510a\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.742263 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwsp2\" (UniqueName: \"kubernetes.io/projected/a170f760-08d9-4fd6-b90d-46ef21e4691e-kube-api-access-zwsp2\") pod \"watcher-operator-controller-manager-667bd8d554-9q8g7\" (UID: \"a170f760-08d9-4fd6-b90d-46ef21e4691e\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.742556 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzcgm\" (UniqueName: \"kubernetes.io/projected/194d6da3-e6b6-4330-9afa-d973a8bb03c2-kube-api-access-rzcgm\") pod \"telemetry-operator-controller-manager-54d54d59bc-c7gtb\" (UID: \"194d6da3-e6b6-4330-9afa-d973a8bb03c2\") " pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.861474 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwsp2\" (UniqueName: \"kubernetes.io/projected/a170f760-08d9-4fd6-b90d-46ef21e4691e-kube-api-access-zwsp2\") pod \"watcher-operator-controller-manager-667bd8d554-9q8g7\" (UID: \"a170f760-08d9-4fd6-b90d-46ef21e4691e\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" Dec 10 11:09:36 crc kubenswrapper[4780]: I1210 11:09:36.861572 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzcgm\" (UniqueName: \"kubernetes.io/projected/194d6da3-e6b6-4330-9afa-d973a8bb03c2-kube-api-access-rzcgm\") pod \"telemetry-operator-controller-manager-54d54d59bc-c7gtb\" (UID: \"194d6da3-e6b6-4330-9afa-d973a8bb03c2\") " pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.861755 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldvtn\" (UniqueName: \"kubernetes.io/projected/de0be1fa-33a0-44ad-9aed-c791a447510a-kube-api-access-ldvtn\") pod \"test-operator-controller-manager-5854674fcc-4vbhw\" (UID: \"de0be1fa-33a0-44ad-9aed-c791a447510a\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.861819 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.861867 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.862008 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pdj7\" (UniqueName: \"kubernetes.io/projected/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-kube-api-access-8pdj7\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.895837 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwsp2\" (UniqueName: \"kubernetes.io/projected/a170f760-08d9-4fd6-b90d-46ef21e4691e-kube-api-access-zwsp2\") pod \"watcher-operator-controller-manager-667bd8d554-9q8g7\" (UID: \"a170f760-08d9-4fd6-b90d-46ef21e4691e\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.905847 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzcgm\" (UniqueName: \"kubernetes.io/projected/194d6da3-e6b6-4330-9afa-d973a8bb03c2-kube-api-access-rzcgm\") pod \"telemetry-operator-controller-manager-54d54d59bc-c7gtb\" (UID: \"194d6da3-e6b6-4330-9afa-d973a8bb03c2\") " pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.907419 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldvtn\" (UniqueName: \"kubernetes.io/projected/de0be1fa-33a0-44ad-9aed-c791a447510a-kube-api-access-ldvtn\") pod \"test-operator-controller-manager-5854674fcc-4vbhw\" (UID: \"de0be1fa-33a0-44ad-9aed-c791a447510a\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.941038 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp"] Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.943130 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:36.948111 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-fqkqw" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.398744 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp"] Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.407850 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.407910 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.407995 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pdj7\" (UniqueName: \"kubernetes.io/projected/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-kube-api-access-8pdj7\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:37 crc kubenswrapper[4780]: E1210 11:09:37.412549 4780 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:09:37 crc kubenswrapper[4780]: E1210 11:09:37.412636 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:37.912599879 +0000 UTC m=+1482.765993322 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "metrics-server-cert" not found Dec 10 11:09:37 crc kubenswrapper[4780]: E1210 11:09:37.412700 4780 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:09:37 crc kubenswrapper[4780]: E1210 11:09:37.412740 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:37.912731222 +0000 UTC m=+1482.766124665 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "webhook-server-cert" not found Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.482494 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pdj7\" (UniqueName: \"kubernetes.io/projected/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-kube-api-access-8pdj7\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.510259 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5llp8\" (UniqueName: \"kubernetes.io/projected/3953ad68-9125-44a8-819f-0c48aafcfbf3-kube-api-access-5llp8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-52xpp\" (UID: \"3953ad68-9125-44a8-819f-0c48aafcfbf3\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.614184 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.614483 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5llp8\" (UniqueName: \"kubernetes.io/projected/3953ad68-9125-44a8-819f-0c48aafcfbf3-kube-api-access-5llp8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-52xpp\" (UID: \"3953ad68-9125-44a8-819f-0c48aafcfbf3\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" Dec 10 11:09:37 crc kubenswrapper[4780]: E1210 11:09:37.616198 4780 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:37 crc kubenswrapper[4780]: E1210 11:09:37.616277 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert podName:7d98f853-1b52-438c-a5d1-6fe334794a35 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:39.616247124 +0000 UTC m=+1484.469640557 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fkqjsw" (UID: "7d98f853-1b52-438c-a5d1-6fe334794a35") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.651633 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5llp8\" (UniqueName: \"kubernetes.io/projected/3953ad68-9125-44a8-819f-0c48aafcfbf3-kube-api-access-5llp8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-52xpp\" (UID: \"3953ad68-9125-44a8-819f-0c48aafcfbf3\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.728482 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd"] Dec 10 11:09:37 crc kubenswrapper[4780]: I1210 11:09:37.808900 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw"] Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:37.906148 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.113812 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.113931 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:38 crc kubenswrapper[4780]: E1210 11:09:38.121079 4780 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:09:38 crc kubenswrapper[4780]: E1210 11:09:38.121191 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:39.121166474 +0000 UTC m=+1483.974559907 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "metrics-server-cert" not found Dec 10 11:09:38 crc kubenswrapper[4780]: E1210 11:09:38.123591 4780 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:09:38 crc kubenswrapper[4780]: E1210 11:09:38.123734 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:39.123700658 +0000 UTC m=+1483.977094271 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "webhook-server-cert" not found Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.209275 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.225178 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.327554 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.356082 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.425974 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.471289 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.624373 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:38 crc kubenswrapper[4780]: E1210 11:09:38.624678 4780 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:38 crc kubenswrapper[4780]: E1210 11:09:38.624808 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert podName:e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:42.624779091 +0000 UTC m=+1487.478172584 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert") pod "infra-operator-controller-manager-78d48bff9d-d2txd" (UID: "e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.683235 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" event={"ID":"6f18f8cf-e493-41bd-92e6-a7714992854d","Type":"ContainerStarted","Data":"03190a5be0584bb784e6dd408b98efe2228818e94fe7ba76e8acdfffffa7ec45"} Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.686909 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt"] Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.708451 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" event={"ID":"5a684cfd-18e4-4f16-a0dd-73f2238cce27","Type":"ContainerStarted","Data":"1c95f198e795c8d92285648bf8217ef2194c062d0beba64f1cd8b2600a5832b9"} Dec 10 11:09:38 crc kubenswrapper[4780]: W1210 11:09:38.709707 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd235302b_56b1_4515_9f26_4f0ea884aa87.slice/crio-24f95471e15c92c186ddd97e42d7f598a1e14294de56a78096d16a4c01e2f1e8 WatchSource:0}: Error finding container 24f95471e15c92c186ddd97e42d7f598a1e14294de56a78096d16a4c01e2f1e8: Status 404 returned error can't find the container with id 24f95471e15c92c186ddd97e42d7f598a1e14294de56a78096d16a4c01e2f1e8 Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.752258 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr"] Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.827215 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx"] Dec 10 11:09:38 crc kubenswrapper[4780]: I1210 11:09:38.868640 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8"] Dec 10 11:09:39 crc kubenswrapper[4780]: I1210 11:09:39.522994 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:39 crc kubenswrapper[4780]: I1210 11:09:39.523249 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:39 crc kubenswrapper[4780]: E1210 11:09:39.523486 4780 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:09:39 crc kubenswrapper[4780]: E1210 11:09:39.523571 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:41.523541567 +0000 UTC m=+1486.376935010 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "metrics-server-cert" not found Dec 10 11:09:39 crc kubenswrapper[4780]: E1210 11:09:39.524796 4780 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:09:39 crc kubenswrapper[4780]: E1210 11:09:39.524835 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:41.52482381 +0000 UTC m=+1486.378217253 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "webhook-server-cert" not found Dec 10 11:09:39 crc kubenswrapper[4780]: I1210 11:09:39.658671 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:39 crc kubenswrapper[4780]: E1210 11:09:39.659356 4780 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:39 crc kubenswrapper[4780]: E1210 11:09:39.659440 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert podName:7d98f853-1b52-438c-a5d1-6fe334794a35 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:43.659413366 +0000 UTC m=+1488.512806809 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fkqjsw" (UID: "7d98f853-1b52-438c-a5d1-6fe334794a35") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:39 crc kubenswrapper[4780]: I1210 11:09:39.748501 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" event={"ID":"0dbb81a9-f820-447a-a475-911ae4a53034","Type":"ContainerStarted","Data":"1be2f8960de44afb9d19f252a28a8c40d25db719c9bbe7758314381302599970"} Dec 10 11:09:39 crc kubenswrapper[4780]: I1210 11:09:39.758639 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" event={"ID":"d235302b-56b1-4515-9f26-4f0ea884aa87","Type":"ContainerStarted","Data":"24f95471e15c92c186ddd97e42d7f598a1e14294de56a78096d16a4c01e2f1e8"} Dec 10 11:09:39 crc kubenswrapper[4780]: I1210 11:09:39.777169 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" event={"ID":"77993ff6-b277-4ef9-a00d-08a47d02d483","Type":"ContainerStarted","Data":"f30bea06467a431479070497e52356543ca0ccdb1a454bb9aabfc9eff65ed840"} Dec 10 11:09:39 crc kubenswrapper[4780]: I1210 11:09:39.813682 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" event={"ID":"b1ef4b52-99f2-4257-97d4-bdb6f871f73f","Type":"ContainerStarted","Data":"25441bdedfe494c391f4f591cc92c5cdc03032cfdae1b3247c366abc9f2dc920"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.557398 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.597187 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.710959 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.724633 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.743066 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-84x72"] Dec 10 11:09:40 crc kubenswrapper[4780]: W1210 11:09:40.744624 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a1cc2ce_3a32_447a_8824_7ec8c84b1613.slice/crio-dbc6620d859c84f971a6e77d882134d5e375b3144b58a7df83826b7fcec5c9f0 WatchSource:0}: Error finding container dbc6620d859c84f971a6e77d882134d5e375b3144b58a7df83826b7fcec5c9f0: Status 404 returned error can't find the container with id dbc6620d859c84f971a6e77d882134d5e375b3144b58a7df83826b7fcec5c9f0 Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.761091 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.781856 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.788184 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.804184 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.833577 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.848455 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw"] Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.862133 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" event={"ID":"5e979946-7a11-46af-ab82-77bae1669169","Type":"ContainerStarted","Data":"fba3c80f5d5636d30b7cc199a4d6b47397bc09cc3d6bb8ce5ff073273df2403e"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.868066 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb"] Dec 10 11:09:40 crc kubenswrapper[4780]: W1210 11:09:40.868151 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod194d6da3_e6b6_4330_9afa_d973a8bb03c2.slice/crio-6432d6c0f7cc79e179a961ef9386faabbaad20c230e81a283fe257be750a9742 WatchSource:0}: Error finding container 6432d6c0f7cc79e179a961ef9386faabbaad20c230e81a283fe257be750a9742: Status 404 returned error can't find the container with id 6432d6c0f7cc79e179a961ef9386faabbaad20c230e81a283fe257be750a9742 Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.868271 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" event={"ID":"9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10","Type":"ContainerStarted","Data":"3d916601202876c4754cf04bfdd7d0a47f64059e7cba8059bce23a72637fe813"} Dec 10 11:09:40 crc kubenswrapper[4780]: W1210 11:09:40.874965 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47ef7d7b_4052_4068_adef_b6a94353f980.slice/crio-168c651f625d252ba41f8891235a28eb96fa899abb84b7e48ec1f9905006dcbe WatchSource:0}: Error finding container 168c651f625d252ba41f8891235a28eb96fa899abb84b7e48ec1f9905006dcbe: Status 404 returned error can't find the container with id 168c651f625d252ba41f8891235a28eb96fa899abb84b7e48ec1f9905006dcbe Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.879257 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" event={"ID":"b88956c1-b60b-4a6f-948a-de685134880f","Type":"ContainerStarted","Data":"c27bfd2d4f3e1f3393a8ad19a9b2d5070a3127a44a35fd679a7774236553418c"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.881232 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" event={"ID":"a170f760-08d9-4fd6-b90d-46ef21e4691e","Type":"ContainerStarted","Data":"bf56d1cf4954a9139c0bc46a4bb519a0f158ae64553e2fbc8b82dc412bbb43d3"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.889013 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" event={"ID":"16c6406a-69c5-4365-81d9-8bf51365cd08","Type":"ContainerStarted","Data":"3bd3dfff864688e0d1003718b39d02302d3dcb3af523fdd0dd0bb01c033e5f68"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.892280 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" event={"ID":"3a1cc2ce-3a32-447a-8824-7ec8c84b1613","Type":"ContainerStarted","Data":"dbc6620d859c84f971a6e77d882134d5e375b3144b58a7df83826b7fcec5c9f0"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.903379 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" event={"ID":"bbfacb89-13e0-45ef-853a-1faf76e014d7","Type":"ContainerStarted","Data":"f85fb98340f00281e83e94564cdeac456bced61b5e28f9198d4ecd3f56638b2a"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.905486 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" event={"ID":"23cc63a0-e8c1-49bc-9762-daa6d315409e","Type":"ContainerStarted","Data":"ac09cb059c69cb26c019fec7116dd74f12b52aea8d2859af4abe3930e822e0c9"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.911784 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" event={"ID":"0f7ed694-a606-482f-90ca-bbe99437b5f7","Type":"ContainerStarted","Data":"a054ca280b39a5f45b1fb1f96822cd59a01eeacadd184d609b5f9bb133341146"} Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.965043 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl"] Dec 10 11:09:40 crc kubenswrapper[4780]: E1210 11:09:40.976762 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5llp8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-52xpp_openstack-operators(3953ad68-9125-44a8-819f-0c48aafcfbf3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:09:40 crc kubenswrapper[4780]: E1210 11:09:40.978339 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" podUID="3953ad68-9125-44a8-819f-0c48aafcfbf3" Dec 10 11:09:40 crc kubenswrapper[4780]: I1210 11:09:40.979563 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp"] Dec 10 11:09:40 crc kubenswrapper[4780]: W1210 11:09:40.984580 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42edfebc_9a83_460f_9bb9_50172b9763d3.slice/crio-245e2508227d592cb2a9737765111bc9d7661b6185c9ef9b0b7c36d01ca632ad WatchSource:0}: Error finding container 245e2508227d592cb2a9737765111bc9d7661b6185c9ef9b0b7c36d01ca632ad: Status 404 returned error can't find the container with id 245e2508227d592cb2a9737765111bc9d7661b6185c9ef9b0b7c36d01ca632ad Dec 10 11:09:40 crc kubenswrapper[4780]: E1210 11:09:40.990102 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-njml6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-tgbxl_openstack-operators(42edfebc-9a83-460f-9bb9-50172b9763d3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:09:40 crc kubenswrapper[4780]: E1210 11:09:40.995282 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-njml6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-tgbxl_openstack-operators(42edfebc-9a83-460f-9bb9-50172b9763d3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:09:40 crc kubenswrapper[4780]: E1210 11:09:40.997193 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" podUID="42edfebc-9a83-460f-9bb9-50172b9763d3" Dec 10 11:09:41 crc kubenswrapper[4780]: I1210 11:09:41.539911 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:41 crc kubenswrapper[4780]: I1210 11:09:41.540014 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:41 crc kubenswrapper[4780]: E1210 11:09:41.540501 4780 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:09:41 crc kubenswrapper[4780]: E1210 11:09:41.540581 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:45.540560151 +0000 UTC m=+1490.393953604 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "webhook-server-cert" not found Dec 10 11:09:41 crc kubenswrapper[4780]: E1210 11:09:41.541334 4780 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:09:41 crc kubenswrapper[4780]: E1210 11:09:41.541386 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:45.541373731 +0000 UTC m=+1490.394767174 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "metrics-server-cert" not found Dec 10 11:09:41 crc kubenswrapper[4780]: I1210 11:09:41.946379 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" event={"ID":"194d6da3-e6b6-4330-9afa-d973a8bb03c2","Type":"ContainerStarted","Data":"6432d6c0f7cc79e179a961ef9386faabbaad20c230e81a283fe257be750a9742"} Dec 10 11:09:41 crc kubenswrapper[4780]: I1210 11:09:41.953827 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" event={"ID":"3953ad68-9125-44a8-819f-0c48aafcfbf3","Type":"ContainerStarted","Data":"fbda3f19d396a8eb887a72303b0df7297f77fb38078d7ffa130eb6af0b3bb58f"} Dec 10 11:09:41 crc kubenswrapper[4780]: I1210 11:09:41.957785 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" event={"ID":"47ef7d7b-4052-4068-adef-b6a94353f980","Type":"ContainerStarted","Data":"168c651f625d252ba41f8891235a28eb96fa899abb84b7e48ec1f9905006dcbe"} Dec 10 11:09:41 crc kubenswrapper[4780]: E1210 11:09:41.962903 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" podUID="3953ad68-9125-44a8-819f-0c48aafcfbf3" Dec 10 11:09:42 crc kubenswrapper[4780]: I1210 11:09:41.998204 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" event={"ID":"de0be1fa-33a0-44ad-9aed-c791a447510a","Type":"ContainerStarted","Data":"edd47f77719e14643fddea0521ee8732d42ce73ada397e06f41b474f7402b113"} Dec 10 11:09:42 crc kubenswrapper[4780]: I1210 11:09:41.998272 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" event={"ID":"42edfebc-9a83-460f-9bb9-50172b9763d3","Type":"ContainerStarted","Data":"245e2508227d592cb2a9737765111bc9d7661b6185c9ef9b0b7c36d01ca632ad"} Dec 10 11:09:42 crc kubenswrapper[4780]: E1210 11:09:42.000885 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" podUID="42edfebc-9a83-460f-9bb9-50172b9763d3" Dec 10 11:09:42 crc kubenswrapper[4780]: I1210 11:09:42.641278 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:42 crc kubenswrapper[4780]: E1210 11:09:42.641516 4780 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:42 crc kubenswrapper[4780]: E1210 11:09:42.641576 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert podName:e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:50.641559281 +0000 UTC m=+1495.494952724 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert") pod "infra-operator-controller-manager-78d48bff9d-d2txd" (UID: "e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:09:43 crc kubenswrapper[4780]: E1210 11:09:43.010235 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" podUID="3953ad68-9125-44a8-819f-0c48aafcfbf3" Dec 10 11:09:43 crc kubenswrapper[4780]: E1210 11:09:43.016385 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" podUID="42edfebc-9a83-460f-9bb9-50172b9763d3" Dec 10 11:09:43 crc kubenswrapper[4780]: I1210 11:09:43.891539 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:43 crc kubenswrapper[4780]: E1210 11:09:43.892039 4780 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:43 crc kubenswrapper[4780]: E1210 11:09:43.892120 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert podName:7d98f853-1b52-438c-a5d1-6fe334794a35 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:51.892096904 +0000 UTC m=+1496.745490347 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fkqjsw" (UID: "7d98f853-1b52-438c-a5d1-6fe334794a35") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:09:45 crc kubenswrapper[4780]: I1210 11:09:45.582237 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:45 crc kubenswrapper[4780]: E1210 11:09:45.582518 4780 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:09:45 crc kubenswrapper[4780]: E1210 11:09:45.582817 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:53.582793685 +0000 UTC m=+1498.436187118 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "metrics-server-cert" not found Dec 10 11:09:45 crc kubenswrapper[4780]: I1210 11:09:45.582732 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:45 crc kubenswrapper[4780]: E1210 11:09:45.582871 4780 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:09:45 crc kubenswrapper[4780]: E1210 11:09:45.582979 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs podName:79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3 nodeName:}" failed. No retries permitted until 2025-12-10 11:09:53.582957249 +0000 UTC m=+1498.436350692 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-6kmsn" (UID: "79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3") : secret "webhook-server-cert" not found Dec 10 11:09:50 crc kubenswrapper[4780]: I1210 11:09:50.681510 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:50 crc kubenswrapper[4780]: I1210 11:09:50.708090 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0-cert\") pod \"infra-operator-controller-manager-78d48bff9d-d2txd\" (UID: \"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:50 crc kubenswrapper[4780]: I1210 11:09:50.942023 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:09:51 crc kubenswrapper[4780]: I1210 11:09:51.983105 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:51 crc kubenswrapper[4780]: I1210 11:09:51.993620 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d98f853-1b52-438c-a5d1-6fe334794a35-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fkqjsw\" (UID: \"7d98f853-1b52-438c-a5d1-6fe334794a35\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:52 crc kubenswrapper[4780]: I1210 11:09:52.289237 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:09:53 crc kubenswrapper[4780]: I1210 11:09:53.604238 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:53 crc kubenswrapper[4780]: I1210 11:09:53.604332 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:53 crc kubenswrapper[4780]: I1210 11:09:53.610698 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:53 crc kubenswrapper[4780]: I1210 11:09:53.612716 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-6kmsn\" (UID: \"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:53 crc kubenswrapper[4780]: I1210 11:09:53.758555 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:09:57 crc kubenswrapper[4780]: I1210 11:09:57.476422 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:09:57 crc kubenswrapper[4780]: I1210 11:09:57.477019 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:09:57 crc kubenswrapper[4780]: I1210 11:09:57.477105 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:09:57 crc kubenswrapper[4780]: I1210 11:09:57.478317 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c3c4f9f16910550f67c3bdc81fc9c721bc946d7793a35038605a3c1b6eb79b3b"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:09:57 crc kubenswrapper[4780]: I1210 11:09:57.478426 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://c3c4f9f16910550f67c3bdc81fc9c721bc946d7793a35038605a3c1b6eb79b3b" gracePeriod=600 Dec 10 11:10:06 crc kubenswrapper[4780]: E1210 11:10:06.108965 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:981b6a8f95934a86c5f10ef6e198b07265aeba7f11cf84b9ccd13dfaf06f3ca3" Dec 10 11:10:06 crc kubenswrapper[4780]: E1210 11:10:06.109980 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:981b6a8f95934a86c5f10ef6e198b07265aeba7f11cf84b9ccd13dfaf06f3ca3,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-29ff7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6c677c69b-nkdgw_openstack-operators(5a684cfd-18e4-4f16-a0dd-73f2238cce27): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:06 crc kubenswrapper[4780]: I1210 11:10:06.831598 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="c3c4f9f16910550f67c3bdc81fc9c721bc946d7793a35038605a3c1b6eb79b3b" exitCode=0 Dec 10 11:10:06 crc kubenswrapper[4780]: I1210 11:10:06.831685 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"c3c4f9f16910550f67c3bdc81fc9c721bc946d7793a35038605a3c1b6eb79b3b"} Dec 10 11:10:06 crc kubenswrapper[4780]: I1210 11:10:06.831739 4780 scope.go:117] "RemoveContainer" containerID="2a20f929d413d862ac186ee1144a2d1d554405829efe199ddb0dfa3f0f9ae340" Dec 10 11:10:12 crc kubenswrapper[4780]: E1210 11:10:12.376239 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:424da951f13f1fbe9083215dc9f5088f90676dd813f01fdf3c1a8639b61cbaad" Dec 10 11:10:12 crc kubenswrapper[4780]: E1210 11:10:12.378335 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:424da951f13f1fbe9083215dc9f5088f90676dd813f01fdf3c1a8639b61cbaad,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nm57l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-79c8c4686c-jxsxr_openstack-operators(b1ef4b52-99f2-4257-97d4-bdb6f871f73f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:16 crc kubenswrapper[4780]: E1210 11:10:16.752471 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991" Dec 10 11:10:16 crc kubenswrapper[4780]: E1210 11:10:16.753379 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-c8wcq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9d58d64bc-t764b_openstack-operators(47ef7d7b-4052-4068-adef-b6a94353f980): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:20 crc kubenswrapper[4780]: E1210 11:10:20.614465 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94" Dec 10 11:10:20 crc kubenswrapper[4780]: E1210 11:10:20.616618 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ldvtn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-4vbhw_openstack-operators(de0be1fa-33a0-44ad-9aed-c791a447510a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:21 crc kubenswrapper[4780]: E1210 11:10:21.275362 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:5370dc4a8e776923eec00bb50cbdb2e390e9dde50be26bdc04a216bd2d6b5027" Dec 10 11:10:21 crc kubenswrapper[4780]: E1210 11:10:21.275586 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:5370dc4a8e776923eec00bb50cbdb2e390e9dde50be26bdc04a216bd2d6b5027,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6cmb9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-5697bb5779-lx86j_openstack-operators(16c6406a-69c5-4365-81d9-8bf51365cd08): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:22 crc kubenswrapper[4780]: E1210 11:10:22.107388 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8" Dec 10 11:10:22 crc kubenswrapper[4780]: E1210 11:10:22.108203 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zwsp2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-667bd8d554-9q8g7_openstack-operators(a170f760-08d9-4fd6-b90d-46ef21e4691e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:22 crc kubenswrapper[4780]: E1210 11:10:22.718499 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59" Dec 10 11:10:22 crc kubenswrapper[4780]: E1210 11:10:22.719451 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4nvcv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-jwjxx_openstack-operators(b88956c1-b60b-4a6f-948a-de685134880f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:25 crc kubenswrapper[4780]: E1210 11:10:25.324020 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168" Dec 10 11:10:25 crc kubenswrapper[4780]: E1210 11:10:25.324873 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hddqp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-zs8hq_openstack-operators(23cc63a0-e8c1-49bc-9762-daa6d315409e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:26 crc kubenswrapper[4780]: E1210 11:10:26.264632 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5" Dec 10 11:10:26 crc kubenswrapper[4780]: E1210 11:10:26.264962 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:9e847f4dbdea19ab997f32a02b3680a9bd966f9c705911645c3866a19fda9ea5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nd49x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c6d99b8f-896x8_openstack-operators(0dbb81a9-f820-447a-a475-911ae4a53034): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:27 crc kubenswrapper[4780]: E1210 11:10:27.622358 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557" Dec 10 11:10:27 crc kubenswrapper[4780]: E1210 11:10:27.623429 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jcb8v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-nlzr9_openstack-operators(3a1cc2ce-3a32-447a-8824-7ec8c84b1613): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:28 crc kubenswrapper[4780]: E1210 11:10:28.351560 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a" Dec 10 11:10:28 crc kubenswrapper[4780]: E1210 11:10:28.351839 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5g2cb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-jb52h_openstack-operators(0f7ed694-a606-482f-90ca-bbe99437b5f7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:29 crc kubenswrapper[4780]: E1210 11:10:29.911196 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:5bdb3685be3ddc1efd62e16aaf2fa96ead64315e26d52b1b2a7d8ac01baa1e87" Dec 10 11:10:29 crc kubenswrapper[4780]: E1210 11:10:29.916661 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:5bdb3685be3ddc1efd62e16aaf2fa96ead64315e26d52b1b2a7d8ac01baa1e87,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h5sr9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-967d97867-84x72_openstack-operators(5e979946-7a11-46af-ab82-77bae1669169): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:30 crc kubenswrapper[4780]: E1210 11:10:30.366878 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.143:5001/openstack-k8s-operators/telemetry-operator:d352973ce9a498b9e2a14f554e860795ca5bcdcf" Dec 10 11:10:30 crc kubenswrapper[4780]: E1210 11:10:30.367008 4780 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.143:5001/openstack-k8s-operators/telemetry-operator:d352973ce9a498b9e2a14f554e860795ca5bcdcf" Dec 10 11:10:30 crc kubenswrapper[4780]: E1210 11:10:30.367206 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.143:5001/openstack-k8s-operators/telemetry-operator:d352973ce9a498b9e2a14f554e860795ca5bcdcf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rzcgm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-54d54d59bc-c7gtb_openstack-operators(194d6da3-e6b6-4330-9afa-d973a8bb03c2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:30 crc kubenswrapper[4780]: E1210 11:10:30.965582 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Dec 10 11:10:30 crc kubenswrapper[4780]: E1210 11:10:30.966229 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jdzrz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-lgmzm_openstack-operators(bbfacb89-13e0-45ef-853a-1faf76e014d7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:33 crc kubenswrapper[4780]: E1210 11:10:33.172717 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f" Dec 10 11:10:33 crc kubenswrapper[4780]: E1210 11:10:33.173632 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-njml6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-tgbxl_openstack-operators(42edfebc-9a83-460f-9bb9-50172b9763d3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:41 crc kubenswrapper[4780]: E1210 11:10:41.314299 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7" Dec 10 11:10:41 crc kubenswrapper[4780]: E1210 11:10:41.315735 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5wcxj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7765d96ddf-v6fxt_openstack-operators(9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:41 crc kubenswrapper[4780]: I1210 11:10:41.739506 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd"] Dec 10 11:10:42 crc kubenswrapper[4780]: E1210 11:10:42.590182 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Dec 10 11:10:42 crc kubenswrapper[4780]: E1210 11:10:42.591197 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5llp8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-52xpp_openstack-operators(3953ad68-9125-44a8-819f-0c48aafcfbf3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:42 crc kubenswrapper[4780]: E1210 11:10:42.594214 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" podUID="3953ad68-9125-44a8-819f-0c48aafcfbf3" Dec 10 11:10:43 crc kubenswrapper[4780]: I1210 11:10:43.528309 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw"] Dec 10 11:10:43 crc kubenswrapper[4780]: I1210 11:10:43.590842 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" event={"ID":"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0","Type":"ContainerStarted","Data":"dbebbb3d511f216145361a9097bf64b1a65d43af13b471d0dbb31139361e17a3"} Dec 10 11:10:43 crc kubenswrapper[4780]: I1210 11:10:43.611087 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" event={"ID":"6f18f8cf-e493-41bd-92e6-a7714992854d","Type":"ContainerStarted","Data":"9526a1d0c5bd50f8eb4ae4b1856743b2305471008ec0924f65ee3f42ead2a770"} Dec 10 11:10:43 crc kubenswrapper[4780]: I1210 11:10:43.633841 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16"} Dec 10 11:10:43 crc kubenswrapper[4780]: I1210 11:10:43.667800 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn"] Dec 10 11:10:44 crc kubenswrapper[4780]: I1210 11:10:44.648162 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" event={"ID":"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3","Type":"ContainerStarted","Data":"20bbbd5a2da2c7ce3e5b19b74083954c73850a7dc1c696aa365f86bae7bdbdd0"} Dec 10 11:10:44 crc kubenswrapper[4780]: I1210 11:10:44.650218 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" event={"ID":"7d98f853-1b52-438c-a5d1-6fe334794a35","Type":"ContainerStarted","Data":"0b1e9f77b4e61846d36a8321e4b212ae8bad9520fba369b77577f28ada78d8f8"} Dec 10 11:10:45 crc kubenswrapper[4780]: I1210 11:10:45.668981 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" event={"ID":"d235302b-56b1-4515-9f26-4f0ea884aa87","Type":"ContainerStarted","Data":"ae3066f21df7609f4e001289c0bc1efdf2556cf79bc8dfaad59e45044836cbb4"} Dec 10 11:10:45 crc kubenswrapper[4780]: I1210 11:10:45.674117 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" event={"ID":"77993ff6-b277-4ef9-a00d-08a47d02d483","Type":"ContainerStarted","Data":"da8a39ddb2194b1a29c07574c1be9155cd3dd059d81c16434ca6a336266cff0c"} Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.395378 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.397635 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nm57l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-79c8c4686c-jxsxr_openstack-operators(b1ef4b52-99f2-4257-97d4-bdb6f871f73f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.398764 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" podUID="b1ef4b52-99f2-4257-97d4-bdb6f871f73f" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.893023 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.893819 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-c8wcq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9d58d64bc-t764b_openstack-operators(47ef7d7b-4052-4068-adef-b6a94353f980): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.895112 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" podUID="47ef7d7b-4052-4068-adef-b6a94353f980" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.984605 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.984820 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-29ff7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6c677c69b-nkdgw_openstack-operators(5a684cfd-18e4-4f16-a0dd-73f2238cce27): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.986049 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" podUID="5a684cfd-18e4-4f16-a0dd-73f2238cce27" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.991474 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.991771 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ldvtn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-4vbhw_openstack-operators(de0be1fa-33a0-44ad-9aed-c791a447510a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:10:49 crc kubenswrapper[4780]: E1210 11:10:49.993007 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" podUID="de0be1fa-33a0-44ad-9aed-c791a447510a" Dec 10 11:10:50 crc kubenswrapper[4780]: I1210 11:10:50.163365 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" event={"ID":"79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3","Type":"ContainerStarted","Data":"6db56d4a5d09aed2ff769a2e0c9e7c43fac1427f5974f69de3ad88b4e345e493"} Dec 10 11:10:50 crc kubenswrapper[4780]: I1210 11:10:50.270255 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" podStartSLOduration=76.270140519 podStartE2EDuration="1m16.270140519s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:50.266566379 +0000 UTC m=+1555.119959852" watchObservedRunningTime="2025-12-10 11:10:50.270140519 +0000 UTC m=+1555.123533962" Dec 10 11:10:51 crc kubenswrapper[4780]: I1210 11:10:51.175467 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:10:52 crc kubenswrapper[4780]: E1210 11:10:52.317664 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" podUID="5e979946-7a11-46af-ab82-77bae1669169" Dec 10 11:10:52 crc kubenswrapper[4780]: E1210 11:10:52.325967 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" podUID="16c6406a-69c5-4365-81d9-8bf51365cd08" Dec 10 11:10:52 crc kubenswrapper[4780]: E1210 11:10:52.342722 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" podUID="a170f760-08d9-4fd6-b90d-46ef21e4691e" Dec 10 11:10:52 crc kubenswrapper[4780]: E1210 11:10:52.431226 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" podUID="0f7ed694-a606-482f-90ca-bbe99437b5f7" Dec 10 11:10:52 crc kubenswrapper[4780]: E1210 11:10:52.825811 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" podUID="42edfebc-9a83-460f-9bb9-50172b9763d3" Dec 10 11:10:52 crc kubenswrapper[4780]: E1210 11:10:52.833932 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" podUID="b88956c1-b60b-4a6f-948a-de685134880f" Dec 10 11:10:52 crc kubenswrapper[4780]: E1210 11:10:52.982120 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" podUID="9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10" Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.213636 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" event={"ID":"5a684cfd-18e4-4f16-a0dd-73f2238cce27","Type":"ContainerStarted","Data":"c6199983156a66fc66160d881e1b85641872a413683d6bdeccade717b83822e8"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.228368 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" event={"ID":"d235302b-56b1-4515-9f26-4f0ea884aa87","Type":"ContainerStarted","Data":"525c7c15a9722bd8d3d1ab95f47046fbf951eb1ee1fdf09ca0b1df47139459aa"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.230519 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.235950 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.243617 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" event={"ID":"0f7ed694-a606-482f-90ca-bbe99437b5f7","Type":"ContainerStarted","Data":"cd23527fe0a53248e40166629967db427f0aee89a940f5070ff34dfe204d5c53"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.279634 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" podStartSLOduration=7.526820556 podStartE2EDuration="1m20.279591789s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:09:38.714654282 +0000 UTC m=+1483.568047725" lastFinishedPulling="2025-12-10 11:10:51.467425515 +0000 UTC m=+1556.320818958" observedRunningTime="2025-12-10 11:10:53.272670385 +0000 UTC m=+1558.126063818" watchObservedRunningTime="2025-12-10 11:10:53.279591789 +0000 UTC m=+1558.132985232" Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.297520 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" event={"ID":"b88956c1-b60b-4a6f-948a-de685134880f","Type":"ContainerStarted","Data":"2cc5957add68f9afe505a8b8d8a30883b20ebeba23dea920abee3d99be823d89"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.385338 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" event={"ID":"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0","Type":"ContainerStarted","Data":"a04e46fbeaf7b1d3b7fb4f99ae117237955fdb1b969ab8471000e431f49e053c"} Dec 10 11:10:53 crc kubenswrapper[4780]: E1210 11:10:53.462145 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" podUID="23cc63a0-e8c1-49bc-9762-daa6d315409e" Dec 10 11:10:53 crc kubenswrapper[4780]: E1210 11:10:53.464065 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" podUID="bbfacb89-13e0-45ef-853a-1faf76e014d7" Dec 10 11:10:53 crc kubenswrapper[4780]: E1210 11:10:53.464593 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" podUID="194d6da3-e6b6-4330-9afa-d973a8bb03c2" Dec 10 11:10:53 crc kubenswrapper[4780]: E1210 11:10:53.464837 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" podUID="0dbb81a9-f820-447a-a475-911ae4a53034" Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.501821 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" event={"ID":"a170f760-08d9-4fd6-b90d-46ef21e4691e","Type":"ContainerStarted","Data":"5ad284a76b48f7c9da668f69f072bd40eb48421b3a1c7b7e41d2c8c40a2036e1"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.584445 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" event={"ID":"5e979946-7a11-46af-ab82-77bae1669169","Type":"ContainerStarted","Data":"d478a7c23247c15eaa4bef8a16c469025f9a69d32e3cfebe014abd77c5810f2d"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.629633 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" event={"ID":"16c6406a-69c5-4365-81d9-8bf51365cd08","Type":"ContainerStarted","Data":"d25303d56d4e80504beb408942af15eac10acd2346bd8d73335eb0608afd1424"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.656222 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" event={"ID":"7d98f853-1b52-438c-a5d1-6fe334794a35","Type":"ContainerStarted","Data":"a9632d8b5a6bba4ed62ed6adba77f09424267eb8ed2adf4f6f9187281f7252ff"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.754351 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" event={"ID":"b1ef4b52-99f2-4257-97d4-bdb6f871f73f","Type":"ContainerStarted","Data":"ebc809839ad928575326be9e024b5a8824bae62ea5c8f79ffbcfd515c4f519d3"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.789977 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" event={"ID":"de0be1fa-33a0-44ad-9aed-c791a447510a","Type":"ContainerStarted","Data":"46d0647372fe371d739c97e5adb041a1c34702b39235d51f642a65bcb170d3ed"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.832360 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" event={"ID":"42edfebc-9a83-460f-9bb9-50172b9763d3","Type":"ContainerStarted","Data":"555a11842166e317fd7286a982936f43ac73fcd5d4403753cde5642de5790024"} Dec 10 11:10:53 crc kubenswrapper[4780]: I1210 11:10:53.875355 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" event={"ID":"9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10","Type":"ContainerStarted","Data":"aa778911012956583752e11f81ce0e9760f7036d42353d259fc6948077fd3372"} Dec 10 11:10:53 crc kubenswrapper[4780]: E1210 11:10:53.975066 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" podUID="3953ad68-9125-44a8-819f-0c48aafcfbf3" Dec 10 11:10:54 crc kubenswrapper[4780]: E1210 11:10:54.778791 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" podUID="3a1cc2ce-3a32-447a-8824-7ec8c84b1613" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.891741 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" event={"ID":"de0be1fa-33a0-44ad-9aed-c791a447510a","Type":"ContainerStarted","Data":"4952d690b8f2e7acea73621cf599694a1492c403046c6dbfdce1006e2419c33e"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.891824 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.898573 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" event={"ID":"194d6da3-e6b6-4330-9afa-d973a8bb03c2","Type":"ContainerStarted","Data":"e2e96debdbbf0f4cc14e98752ee0053b32b9ea5ab79bac3629fa0ce381404b7a"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.902214 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" event={"ID":"7d98f853-1b52-438c-a5d1-6fe334794a35","Type":"ContainerStarted","Data":"30476b7a459eab3e2bc729d4f426de0df51da2e44f3da308097c0f40b4b39a0c"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.902315 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.906423 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" event={"ID":"b1ef4b52-99f2-4257-97d4-bdb6f871f73f","Type":"ContainerStarted","Data":"9233cebbfeec9d287a04e78e60c6c76648e58eb3a8d0b57f1e2d786ed73f83db"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.906507 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.910978 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" event={"ID":"e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0","Type":"ContainerStarted","Data":"1bd049acba2876d0a65fd8d87c506773e47a9ef5d0c901306715ebf2e7fd85a8"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.911170 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.922247 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" event={"ID":"23cc63a0-e8c1-49bc-9762-daa6d315409e","Type":"ContainerStarted","Data":"00f54004ce346b4338feae59f3cfa00df9d095abcdb46b8949343ab2e9236467"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.925774 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" podStartSLOduration=10.171265551 podStartE2EDuration="1m20.925741929s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.852148155 +0000 UTC m=+1485.705541598" lastFinishedPulling="2025-12-10 11:10:51.606624533 +0000 UTC m=+1556.460017976" observedRunningTime="2025-12-10 11:10:54.918889616 +0000 UTC m=+1559.772283059" watchObservedRunningTime="2025-12-10 11:10:54.925741929 +0000 UTC m=+1559.779135372" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.934851 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" event={"ID":"77993ff6-b277-4ef9-a00d-08a47d02d483","Type":"ContainerStarted","Data":"fb897eacfb357b684dfd7d935db356a3b5fd2e8609f41c5fed30af2566017d5a"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.936374 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.939346 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.939952 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" event={"ID":"5a684cfd-18e4-4f16-a0dd-73f2238cce27","Type":"ContainerStarted","Data":"65cb4c4056e66525244e7c1e90ad1b70035eed21f878f50ea3e0f820a9533770"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.941211 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.942971 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" event={"ID":"3a1cc2ce-3a32-447a-8824-7ec8c84b1613","Type":"ContainerStarted","Data":"75c2ee768e8999e3115cd6c8fd7db409d0ab2e62974bf6b5c6fe1d89764dd9c7"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.956789 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" podStartSLOduration=74.181539263 podStartE2EDuration="1m21.956766061s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:10:42.796357738 +0000 UTC m=+1547.649751181" lastFinishedPulling="2025-12-10 11:10:50.571584536 +0000 UTC m=+1555.424977979" observedRunningTime="2025-12-10 11:10:54.953376565 +0000 UTC m=+1559.806770008" watchObservedRunningTime="2025-12-10 11:10:54.956766061 +0000 UTC m=+1559.810159504" Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.960157 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" event={"ID":"bbfacb89-13e0-45ef-853a-1faf76e014d7","Type":"ContainerStarted","Data":"636c49bb5a921ba054b482917b8f3254ebc6a59e9fb9060816a12a0b05ef86a6"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.970961 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" event={"ID":"0dbb81a9-f820-447a-a475-911ae4a53034","Type":"ContainerStarted","Data":"fa77100f301122c587c4019738e3ce598e5dc7850ee7be77d4a44b0feab6a882"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.978101 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" event={"ID":"47ef7d7b-4052-4068-adef-b6a94353f980","Type":"ContainerStarted","Data":"3c00054b756dcff81e7eeb0ba2e9b3a7b3b89a7befc14b501c63679b0ba055cf"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.996056 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" event={"ID":"6f18f8cf-e493-41bd-92e6-a7714992854d","Type":"ContainerStarted","Data":"6ccca933de6a81d25da5270be22440feeaf47281c23429d5a3b9fb9d884e88b3"} Dec 10 11:10:54 crc kubenswrapper[4780]: I1210 11:10:54.996103 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" Dec 10 11:10:55 crc kubenswrapper[4780]: I1210 11:10:55.003491 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" Dec 10 11:10:55 crc kubenswrapper[4780]: I1210 11:10:55.016521 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" podStartSLOduration=8.12750074 podStartE2EDuration="1m21.016496356s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:38.71537199 +0000 UTC m=+1483.568765443" lastFinishedPulling="2025-12-10 11:10:51.604367616 +0000 UTC m=+1556.457761059" observedRunningTime="2025-12-10 11:10:55.010823493 +0000 UTC m=+1559.864216936" watchObservedRunningTime="2025-12-10 11:10:55.016496356 +0000 UTC m=+1559.869889809" Dec 10 11:10:55 crc kubenswrapper[4780]: I1210 11:10:55.058676 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" podStartSLOduration=74.156696561 podStartE2EDuration="1m21.058639718s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:10:44.628747772 +0000 UTC m=+1549.482141235" lastFinishedPulling="2025-12-10 11:10:51.530690949 +0000 UTC m=+1556.384084392" observedRunningTime="2025-12-10 11:10:55.054839873 +0000 UTC m=+1559.908233336" watchObservedRunningTime="2025-12-10 11:10:55.058639718 +0000 UTC m=+1559.912033161" Dec 10 11:10:55 crc kubenswrapper[4780]: I1210 11:10:55.136055 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-h6ggx" podStartSLOduration=9.337197141 podStartE2EDuration="1m22.136026589s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:09:38.80324969 +0000 UTC m=+1483.656643133" lastFinishedPulling="2025-12-10 11:10:51.602079138 +0000 UTC m=+1556.455472581" observedRunningTime="2025-12-10 11:10:55.12338537 +0000 UTC m=+1559.976778813" watchObservedRunningTime="2025-12-10 11:10:55.136026589 +0000 UTC m=+1559.989420032" Dec 10 11:10:55 crc kubenswrapper[4780]: I1210 11:10:55.251105 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" podStartSLOduration=8.547420286 podStartE2EDuration="1m22.251080309s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:09:37.901607306 +0000 UTC m=+1482.755000749" lastFinishedPulling="2025-12-10 11:10:51.605267329 +0000 UTC m=+1556.458660772" observedRunningTime="2025-12-10 11:10:55.247454427 +0000 UTC m=+1560.100847880" watchObservedRunningTime="2025-12-10 11:10:55.251080309 +0000 UTC m=+1560.104473752" Dec 10 11:10:55 crc kubenswrapper[4780]: I1210 11:10:55.367206 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" podStartSLOduration=8.680988083 podStartE2EDuration="1m22.367177615s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:09:37.902273123 +0000 UTC m=+1482.755666566" lastFinishedPulling="2025-12-10 11:10:51.588462655 +0000 UTC m=+1556.441856098" observedRunningTime="2025-12-10 11:10:55.348277038 +0000 UTC m=+1560.201670471" watchObservedRunningTime="2025-12-10 11:10:55.367177615 +0000 UTC m=+1560.220571048" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.099199 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" event={"ID":"47ef7d7b-4052-4068-adef-b6a94353f980","Type":"ContainerStarted","Data":"3f289c821632617d193e2480f995aeef93b3af004a57644e5f49528505df81d8"} Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.099747 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" event={"ID":"9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10","Type":"ContainerStarted","Data":"26c924b8d7ecb3c00bb064635f528831bc3380b617945fe2a451c37735e12487"} Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.099776 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" event={"ID":"0f7ed694-a606-482f-90ca-bbe99437b5f7","Type":"ContainerStarted","Data":"e4cf80f8e9029250b0e9f3ea47b9777f9e37e2d18309bd2d824117ccc54e27a7"} Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.099792 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" event={"ID":"b88956c1-b60b-4a6f-948a-de685134880f","Type":"ContainerStarted","Data":"cb525e1175a4453626a3e116720b6fd4a69d7f5437502e782d40ffc3c369b8f3"} Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.104428 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.106527 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" event={"ID":"16c6406a-69c5-4365-81d9-8bf51365cd08","Type":"ContainerStarted","Data":"d5dccfadfa834372fb5fab32207a4ed2ee7df0b9a06cdf5d57b037cfbd7d63e8"} Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.108135 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.113289 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" event={"ID":"42edfebc-9a83-460f-9bb9-50172b9763d3","Type":"ContainerStarted","Data":"d7ada6f4bd434053815d9a4692f22b4b817d934b5f191aa1fe0e4c07c7151d13"} Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.114035 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.124778 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" event={"ID":"194d6da3-e6b6-4330-9afa-d973a8bb03c2","Type":"ContainerStarted","Data":"680249319f6654aa9e173af082f79f13d71096cbc91ee610ef14088fcba2f849"} Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.124830 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.387034 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" podStartSLOduration=7.2959474140000005 podStartE2EDuration="1m22.387001029s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.489784995 +0000 UTC m=+1485.343178428" lastFinishedPulling="2025-12-10 11:10:55.5808386 +0000 UTC m=+1560.434232043" observedRunningTime="2025-12-10 11:10:56.151918914 +0000 UTC m=+1561.005312357" watchObservedRunningTime="2025-12-10 11:10:56.387001029 +0000 UTC m=+1561.240394482" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.388113 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" podStartSLOduration=7.798140432 podStartE2EDuration="1m22.388104407s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.476192663 +0000 UTC m=+1485.329586106" lastFinishedPulling="2025-12-10 11:10:55.066156648 +0000 UTC m=+1559.919550081" observedRunningTime="2025-12-10 11:10:56.38426365 +0000 UTC m=+1561.237657093" watchObservedRunningTime="2025-12-10 11:10:56.388104407 +0000 UTC m=+1561.241497850" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.507664 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" podStartSLOduration=8.319125559 podStartE2EDuration="1m22.507624629s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.878837177 +0000 UTC m=+1485.732230620" lastFinishedPulling="2025-12-10 11:10:55.067336237 +0000 UTC m=+1559.920729690" observedRunningTime="2025-12-10 11:10:56.496401776 +0000 UTC m=+1561.349795219" watchObservedRunningTime="2025-12-10 11:10:56.507624629 +0000 UTC m=+1561.361018082" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.643187 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" podStartSLOduration=10.741616595 podStartE2EDuration="1m22.643163065s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.882687174 +0000 UTC m=+1485.736080617" lastFinishedPulling="2025-12-10 11:10:52.784233644 +0000 UTC m=+1557.637627087" observedRunningTime="2025-12-10 11:10:56.642492208 +0000 UTC m=+1561.495885651" watchObservedRunningTime="2025-12-10 11:10:56.643163065 +0000 UTC m=+1561.496556508" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.651865 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" podStartSLOduration=8.721799244 podStartE2EDuration="1m22.651829194s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.989895452 +0000 UTC m=+1485.843288895" lastFinishedPulling="2025-12-10 11:10:54.919925402 +0000 UTC m=+1559.773318845" observedRunningTime="2025-12-10 11:10:56.549270549 +0000 UTC m=+1561.402664012" watchObservedRunningTime="2025-12-10 11:10:56.651829194 +0000 UTC m=+1561.505222627" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.703459 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" podStartSLOduration=9.251566646 podStartE2EDuration="1m23.703426384s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.571022939 +0000 UTC m=+1485.424416382" lastFinishedPulling="2025-12-10 11:10:55.022882677 +0000 UTC m=+1559.876276120" observedRunningTime="2025-12-10 11:10:56.681729817 +0000 UTC m=+1561.535123260" watchObservedRunningTime="2025-12-10 11:10:56.703426384 +0000 UTC m=+1561.556819837" Dec 10 11:10:56 crc kubenswrapper[4780]: I1210 11:10:56.726308 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" podStartSLOduration=9.281818852 podStartE2EDuration="1m23.72627702s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.47570395 +0000 UTC m=+1485.329097393" lastFinishedPulling="2025-12-10 11:10:54.920162118 +0000 UTC m=+1559.773555561" observedRunningTime="2025-12-10 11:10:56.721450269 +0000 UTC m=+1561.574843712" watchObservedRunningTime="2025-12-10 11:10:56.72627702 +0000 UTC m=+1561.579670463" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.136378 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" event={"ID":"5e979946-7a11-46af-ab82-77bae1669169","Type":"ContainerStarted","Data":"1981e477a8474aa38812960855e2ff1e569b8062ae2600de199de979fe683c23"} Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.140450 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.145189 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" event={"ID":"3a1cc2ce-3a32-447a-8824-7ec8c84b1613","Type":"ContainerStarted","Data":"9c21104a0830087a43a380028a08131caa3406d0403b863899200efde0b263ec"} Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.146512 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.150279 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" event={"ID":"bbfacb89-13e0-45ef-853a-1faf76e014d7","Type":"ContainerStarted","Data":"41d9b638b0e1bc66e1e4035fecab68a03dcb3e1bd97c9203e828e45b4f8f3bf9"} Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.151258 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.154653 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" event={"ID":"0dbb81a9-f820-447a-a475-911ae4a53034","Type":"ContainerStarted","Data":"ae1e7643342880aa0989233ebfe4af09a989933799213ade792ed5f783ab5d24"} Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.155665 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.160278 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" event={"ID":"23cc63a0-e8c1-49bc-9762-daa6d315409e","Type":"ContainerStarted","Data":"232b8849a4395511a606ea67a675c4631976d219c54f23a43b286df064622618"} Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.161182 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.165883 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" event={"ID":"a170f760-08d9-4fd6-b90d-46ef21e4691e","Type":"ContainerStarted","Data":"13f06473c7494d776f3b2cae4deab25d8fccae25a0d3a256f1d3308deb79f933"} Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.169463 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.169565 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.170883 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.173458 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-nkdgw" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.191912 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" podStartSLOduration=8.703344261 podStartE2EDuration="1m24.191879775s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.490599395 +0000 UTC m=+1485.343992838" lastFinishedPulling="2025-12-10 11:10:55.979134909 +0000 UTC m=+1560.832528352" observedRunningTime="2025-12-10 11:10:57.1853187 +0000 UTC m=+1562.038712153" watchObservedRunningTime="2025-12-10 11:10:57.191879775 +0000 UTC m=+1562.045273218" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.266974 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" podStartSLOduration=8.104417881 podStartE2EDuration="1m23.266925757s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.759091083 +0000 UTC m=+1485.612484526" lastFinishedPulling="2025-12-10 11:10:55.921598949 +0000 UTC m=+1560.774992402" observedRunningTime="2025-12-10 11:10:57.266166828 +0000 UTC m=+1562.119560271" watchObservedRunningTime="2025-12-10 11:10:57.266925757 +0000 UTC m=+1562.120319190" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.324737 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" podStartSLOduration=7.984959645 podStartE2EDuration="1m23.324699163s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.636478287 +0000 UTC m=+1485.489871730" lastFinishedPulling="2025-12-10 11:10:55.976217805 +0000 UTC m=+1560.829611248" observedRunningTime="2025-12-10 11:10:57.316852075 +0000 UTC m=+1562.170245528" watchObservedRunningTime="2025-12-10 11:10:57.324699163 +0000 UTC m=+1562.178092606" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.362260 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" podStartSLOduration=7.28808317 podStartE2EDuration="1m24.362206658s" podCreationTimestamp="2025-12-10 11:09:33 +0000 UTC" firstStartedPulling="2025-12-10 11:09:38.903601235 +0000 UTC m=+1483.756994678" lastFinishedPulling="2025-12-10 11:10:55.977724723 +0000 UTC m=+1560.831118166" observedRunningTime="2025-12-10 11:10:57.353496509 +0000 UTC m=+1562.206889972" watchObservedRunningTime="2025-12-10 11:10:57.362206658 +0000 UTC m=+1562.215600111" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.400385 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" podStartSLOduration=8.186996361 podStartE2EDuration="1m23.40034697s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.803352767 +0000 UTC m=+1485.656746220" lastFinishedPulling="2025-12-10 11:10:56.016703386 +0000 UTC m=+1560.870096829" observedRunningTime="2025-12-10 11:10:57.39121696 +0000 UTC m=+1562.244610403" watchObservedRunningTime="2025-12-10 11:10:57.40034697 +0000 UTC m=+1562.253740413" Dec 10 11:10:57 crc kubenswrapper[4780]: I1210 11:10:57.436506 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" podStartSLOduration=7.26640198 podStartE2EDuration="1m23.43646796s" podCreationTimestamp="2025-12-10 11:09:34 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.461085193 +0000 UTC m=+1485.314478646" lastFinishedPulling="2025-12-10 11:10:56.631151173 +0000 UTC m=+1561.484544626" observedRunningTime="2025-12-10 11:10:57.426640543 +0000 UTC m=+1562.280033986" watchObservedRunningTime="2025-12-10 11:10:57.43646796 +0000 UTC m=+1562.289861403" Dec 10 11:10:58 crc kubenswrapper[4780]: I1210 11:10:58.180400 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" Dec 10 11:10:58 crc kubenswrapper[4780]: I1210 11:10:58.331692 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-4vbhw" Dec 10 11:11:00 crc kubenswrapper[4780]: I1210 11:11:00.950373 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-d2txd" Dec 10 11:11:02 crc kubenswrapper[4780]: I1210 11:11:02.298735 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fkqjsw" Dec 10 11:11:03 crc kubenswrapper[4780]: I1210 11:11:03.767167 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" Dec 10 11:11:04 crc kubenswrapper[4780]: I1210 11:11:04.664708 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-896x8" Dec 10 11:11:04 crc kubenswrapper[4780]: I1210 11:11:04.725964 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-lx86j" Dec 10 11:11:05 crc kubenswrapper[4780]: I1210 11:11:05.438433 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-v6fxt" Dec 10 11:11:05 crc kubenswrapper[4780]: I1210 11:11:05.439236 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-967d97867-84x72" Dec 10 11:11:05 crc kubenswrapper[4780]: I1210 11:11:05.674112 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-jb52h" Dec 10 11:11:05 crc kubenswrapper[4780]: I1210 11:11:05.679194 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-jxsxr" Dec 10 11:11:05 crc kubenswrapper[4780]: I1210 11:11:05.708316 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-nlzr9" Dec 10 11:11:05 crc kubenswrapper[4780]: I1210 11:11:05.760555 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-lgmzm" Dec 10 11:11:06 crc kubenswrapper[4780]: I1210 11:11:06.557273 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-zs8hq" Dec 10 11:11:07 crc kubenswrapper[4780]: I1210 11:11:07.916068 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-jwjxx" Dec 10 11:11:08 crc kubenswrapper[4780]: I1210 11:11:08.215247 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-tgbxl" Dec 10 11:11:08 crc kubenswrapper[4780]: I1210 11:11:08.231233 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-t764b" Dec 10 11:11:08 crc kubenswrapper[4780]: I1210 11:11:08.453419 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-c7gtb" Dec 10 11:11:08 crc kubenswrapper[4780]: I1210 11:11:08.482730 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-9q8g7" Dec 10 11:11:11 crc kubenswrapper[4780]: I1210 11:11:11.972725 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" event={"ID":"3953ad68-9125-44a8-819f-0c48aafcfbf3","Type":"ContainerStarted","Data":"2eef54e12248b45d1c462f29fc4a87dff959eb8c214a76bd884a877948a5fe64"} Dec 10 11:11:12 crc kubenswrapper[4780]: I1210 11:11:12.003125 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-52xpp" podStartSLOduration=7.675324902 podStartE2EDuration="1m37.00309279s" podCreationTimestamp="2025-12-10 11:09:35 +0000 UTC" firstStartedPulling="2025-12-10 11:09:40.976508995 +0000 UTC m=+1485.829902438" lastFinishedPulling="2025-12-10 11:11:10.304276863 +0000 UTC m=+1575.157670326" observedRunningTime="2025-12-10 11:11:11.992822611 +0000 UTC m=+1576.846216054" watchObservedRunningTime="2025-12-10 11:11:12.00309279 +0000 UTC m=+1576.856486233" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.431341 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cgslz"] Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.434899 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.436802 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.441348 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.441366 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.441601 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-z5dts" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.444747 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cgslz"] Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.455058 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sqq22"] Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.457779 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.464858 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.479236 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sqq22"] Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.513138 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km2zh\" (UniqueName: \"kubernetes.io/projected/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-kube-api-access-km2zh\") pod \"dnsmasq-dns-675f4bcbfc-cgslz\" (UID: \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.513540 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-config\") pod \"dnsmasq-dns-675f4bcbfc-cgslz\" (UID: \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.615638 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-config\") pod \"dnsmasq-dns-675f4bcbfc-cgslz\" (UID: \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.615747 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-config\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.615773 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.615795 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km2zh\" (UniqueName: \"kubernetes.io/projected/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-kube-api-access-km2zh\") pod \"dnsmasq-dns-675f4bcbfc-cgslz\" (UID: \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.615847 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ptlx\" (UniqueName: \"kubernetes.io/projected/c6a6a404-b951-4032-a505-7e41cc0a6eeb-kube-api-access-6ptlx\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.617455 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-config\") pod \"dnsmasq-dns-675f4bcbfc-cgslz\" (UID: \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.649073 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km2zh\" (UniqueName: \"kubernetes.io/projected/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-kube-api-access-km2zh\") pod \"dnsmasq-dns-675f4bcbfc-cgslz\" (UID: \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.717992 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-config\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.718621 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.718714 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ptlx\" (UniqueName: \"kubernetes.io/projected/c6a6a404-b951-4032-a505-7e41cc0a6eeb-kube-api-access-6ptlx\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.719296 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-config\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.720358 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.739742 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ptlx\" (UniqueName: \"kubernetes.io/projected/c6a6a404-b951-4032-a505-7e41cc0a6eeb-kube-api-access-6ptlx\") pod \"dnsmasq-dns-78dd6ddcc-sqq22\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.772665 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:11:28 crc kubenswrapper[4780]: I1210 11:11:28.792310 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:11:29 crc kubenswrapper[4780]: I1210 11:11:29.320759 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cgslz"] Dec 10 11:11:29 crc kubenswrapper[4780]: W1210 11:11:29.332485 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9cf3b6c_c9be_44d8_ab60_bb9dae0ecc75.slice/crio-866ce9a412badb11ef29ee317108b07a9be921236db69f455c7746c3c658eb7d WatchSource:0}: Error finding container 866ce9a412badb11ef29ee317108b07a9be921236db69f455c7746c3c658eb7d: Status 404 returned error can't find the container with id 866ce9a412badb11ef29ee317108b07a9be921236db69f455c7746c3c658eb7d Dec 10 11:11:29 crc kubenswrapper[4780]: I1210 11:11:29.360194 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sqq22"] Dec 10 11:11:29 crc kubenswrapper[4780]: W1210 11:11:29.380379 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6a6a404_b951_4032_a505_7e41cc0a6eeb.slice/crio-99030aadd463cef8a258fe4c1ec3ad59fe3e666340e5ee0e3d1ad1d6414947e9 WatchSource:0}: Error finding container 99030aadd463cef8a258fe4c1ec3ad59fe3e666340e5ee0e3d1ad1d6414947e9: Status 404 returned error can't find the container with id 99030aadd463cef8a258fe4c1ec3ad59fe3e666340e5ee0e3d1ad1d6414947e9 Dec 10 11:11:30 crc kubenswrapper[4780]: I1210 11:11:30.216465 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" event={"ID":"c6a6a404-b951-4032-a505-7e41cc0a6eeb","Type":"ContainerStarted","Data":"99030aadd463cef8a258fe4c1ec3ad59fe3e666340e5ee0e3d1ad1d6414947e9"} Dec 10 11:11:30 crc kubenswrapper[4780]: I1210 11:11:30.218636 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" event={"ID":"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75","Type":"ContainerStarted","Data":"866ce9a412badb11ef29ee317108b07a9be921236db69f455c7746c3c658eb7d"} Dec 10 11:11:31 crc kubenswrapper[4780]: I1210 11:11:31.857822 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cgslz"] Dec 10 11:11:31 crc kubenswrapper[4780]: I1210 11:11:31.919461 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zc5ks"] Dec 10 11:11:31 crc kubenswrapper[4780]: I1210 11:11:31.922489 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:31 crc kubenswrapper[4780]: I1210 11:11:31.949239 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zc5ks"] Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.050959 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgttr\" (UniqueName: \"kubernetes.io/projected/26190c4b-d1ff-4509-b9cb-572e40141033-kube-api-access-tgttr\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.051571 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-config\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.051674 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.154320 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.154492 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgttr\" (UniqueName: \"kubernetes.io/projected/26190c4b-d1ff-4509-b9cb-572e40141033-kube-api-access-tgttr\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.154542 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-config\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.158250 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-dns-svc\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.164606 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-config\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.198253 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgttr\" (UniqueName: \"kubernetes.io/projected/26190c4b-d1ff-4509-b9cb-572e40141033-kube-api-access-tgttr\") pod \"dnsmasq-dns-666b6646f7-zc5ks\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.294789 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.376824 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sqq22"] Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.407073 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wjfv6"] Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.409908 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.439742 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wjfv6"] Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.565850 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-config\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.566504 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.566541 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8qzn\" (UniqueName: \"kubernetes.io/projected/295c3a80-ef4d-4d10-b21b-14387a75a1af-kube-api-access-r8qzn\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.779343 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-config\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.779493 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.779524 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8qzn\" (UniqueName: \"kubernetes.io/projected/295c3a80-ef4d-4d10-b21b-14387a75a1af-kube-api-access-r8qzn\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.782822 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.790242 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-config\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:32 crc kubenswrapper[4780]: I1210 11:11:32.828512 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8qzn\" (UniqueName: \"kubernetes.io/projected/295c3a80-ef4d-4d10-b21b-14387a75a1af-kube-api-access-r8qzn\") pod \"dnsmasq-dns-57d769cc4f-wjfv6\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.081398 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.096505 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.103226 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.103743 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.104002 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wl44s" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.104147 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.104345 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.104557 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.104695 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.107369 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.136732 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.264211 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zc5ks"] Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301297 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301391 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301421 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301452 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-server-conf\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301483 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcpxv\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-kube-api-access-bcpxv\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301562 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301606 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301651 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/930a45eb-72d1-4060-92de-2e348073eb16-pod-info\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301689 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-config-data\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301716 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.301746 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/930a45eb-72d1-4060-92de-2e348073eb16-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.338796 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" event={"ID":"26190c4b-d1ff-4509-b9cb-572e40141033","Type":"ContainerStarted","Data":"be12da5061108dd504c7d1e7803ca425afb4dac48ad8b66d6e2a3a5d80cb08d7"} Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.403513 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.403594 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.403624 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.404210 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.404785 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-server-conf\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.404950 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcpxv\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-kube-api-access-bcpxv\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.405137 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.405210 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.405277 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/930a45eb-72d1-4060-92de-2e348073eb16-pod-info\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.405325 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-config-data\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.405367 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.405402 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/930a45eb-72d1-4060-92de-2e348073eb16-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.406121 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-server-conf\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.406269 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.407342 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-config-data\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.407469 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.407703 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.420952 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/930a45eb-72d1-4060-92de-2e348073eb16-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.424482 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.434456 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/930a45eb-72d1-4060-92de-2e348073eb16-pod-info\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.439117 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.445872 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcpxv\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-kube-api-access-bcpxv\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.511638 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.567027 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.784785 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.985899 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.986149 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 10 11:11:33 crc kubenswrapper[4780]: I1210 11:11:33.986578 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.009631 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.009879 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.010437 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.010779 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.011038 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-5qm56" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.013782 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.092785 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.092960 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.092993 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptcx5\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-kube-api-access-ptcx5\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.093143 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.093715 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.094083 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.094238 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.094492 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.094545 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.094580 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.094640 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.197198 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.197732 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.197765 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.197812 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.197878 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.197980 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.198008 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptcx5\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-kube-api-access-ptcx5\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.198061 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.198220 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.198337 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.198378 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.202117 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.202503 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.203112 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.203276 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.206268 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.206711 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.206980 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.211626 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.212886 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.215754 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.264846 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptcx5\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-kube-api-access-ptcx5\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.292302 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.314683 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.378242 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wjfv6"] Dec 10 11:11:34 crc kubenswrapper[4780]: W1210 11:11:34.428086 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod295c3a80_ef4d_4d10_b21b_14387a75a1af.slice/crio-747290fc2b556c1e90348762ec2e06ec352ef19fa58d9de84b7d5513287c1471 WatchSource:0}: Error finding container 747290fc2b556c1e90348762ec2e06ec352ef19fa58d9de84b7d5513287c1471: Status 404 returned error can't find the container with id 747290fc2b556c1e90348762ec2e06ec352ef19fa58d9de84b7d5513287c1471 Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.495091 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.498310 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.522591 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.523261 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.523704 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-bk44p" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.523749 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.526757 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.528258 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.615697 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-kolla-config\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.615810 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wnlb\" (UniqueName: \"kubernetes.io/projected/c31145f5-6188-4934-8ceb-a86ac4a0e997-kube-api-access-7wnlb\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.615980 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-config-data-default\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.616006 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31145f5-6188-4934-8ceb-a86ac4a0e997-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.616090 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c31145f5-6188-4934-8ceb-a86ac4a0e997-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.616290 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c31145f5-6188-4934-8ceb-a86ac4a0e997-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.616362 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.616433 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.757477 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wnlb\" (UniqueName: \"kubernetes.io/projected/c31145f5-6188-4934-8ceb-a86ac4a0e997-kube-api-access-7wnlb\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.757759 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-config-data-default\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.757797 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31145f5-6188-4934-8ceb-a86ac4a0e997-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.757875 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c31145f5-6188-4934-8ceb-a86ac4a0e997-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.758166 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c31145f5-6188-4934-8ceb-a86ac4a0e997-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.758258 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.758348 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.758466 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-kolla-config\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.759535 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.794250 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.958456 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-config-data-default\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.958772 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c31145f5-6188-4934-8ceb-a86ac4a0e997-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.959185 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c31145f5-6188-4934-8ceb-a86ac4a0e997-kolla-config\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:34 crc kubenswrapper[4780]: I1210 11:11:34.965839 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31145f5-6188-4934-8ceb-a86ac4a0e997-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:35 crc kubenswrapper[4780]: I1210 11:11:35.131689 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c31145f5-6188-4934-8ceb-a86ac4a0e997-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:35 crc kubenswrapper[4780]: I1210 11:11:35.171903 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wnlb\" (UniqueName: \"kubernetes.io/projected/c31145f5-6188-4934-8ceb-a86ac4a0e997-kube-api-access-7wnlb\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:35 crc kubenswrapper[4780]: I1210 11:11:35.173303 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"c31145f5-6188-4934-8ceb-a86ac4a0e997\") " pod="openstack/openstack-galera-0" Dec 10 11:11:35 crc kubenswrapper[4780]: I1210 11:11:35.350263 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:35 crc kubenswrapper[4780]: I1210 11:11:35.398905 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" event={"ID":"295c3a80-ef4d-4d10-b21b-14387a75a1af","Type":"ContainerStarted","Data":"747290fc2b556c1e90348762ec2e06ec352ef19fa58d9de84b7d5513287c1471"} Dec 10 11:11:35 crc kubenswrapper[4780]: I1210 11:11:35.463208 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.171003 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:36 crc kubenswrapper[4780]: W1210 11:11:36.252878 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f6ef7c1_91bd_4109_af1b_9cf3960ec2ae.slice/crio-a01dbf379f65621e3f1448b3f2f18728c81f468790d8799ce9d6b901918f4f42 WatchSource:0}: Error finding container a01dbf379f65621e3f1448b3f2f18728c81f468790d8799ce9d6b901918f4f42: Status 404 returned error can't find the container with id a01dbf379f65621e3f1448b3f2f18728c81f468790d8799ce9d6b901918f4f42 Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.365710 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.386169 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.390675 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.392042 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.392548 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.393291 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-q5jrc" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.447806 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.515644 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae","Type":"ContainerStarted","Data":"a01dbf379f65621e3f1448b3f2f18728c81f468790d8799ce9d6b901918f4f42"} Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.520365 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"930a45eb-72d1-4060-92de-2e348073eb16","Type":"ContainerStarted","Data":"973f12245001808e3a5d185833d5abacda718988a830d803b98fbc87612fc2eb"} Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.593942 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2ddt\" (UniqueName: \"kubernetes.io/projected/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-kube-api-access-j2ddt\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.596583 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.597715 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.598040 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.598119 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.598218 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.598347 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.599157 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.615302 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.622092 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.644423 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.644662 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.644849 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-9xt6n" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.694068 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.713226 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.714461 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.714496 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe36479c-f1fb-4928-b399-e56c8df9205c-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.714589 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fe36479c-f1fb-4928-b399-e56c8df9205c-kolla-config\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.714729 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.714772 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.716503 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.716631 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe36479c-f1fb-4928-b399-e56c8df9205c-config-data\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.716675 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.716705 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe36479c-f1fb-4928-b399-e56c8df9205c-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.716763 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.717041 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prs6g\" (UniqueName: \"kubernetes.io/projected/fe36479c-f1fb-4928-b399-e56c8df9205c-kube-api-access-prs6g\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.717178 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2ddt\" (UniqueName: \"kubernetes.io/projected/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-kube-api-access-j2ddt\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.718365 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:36 crc kubenswrapper[4780]: I1210 11:11:36.719032 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.058971 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.060048 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.061077 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.062988 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.063102 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.065566 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe36479c-f1fb-4928-b399-e56c8df9205c-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.065616 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fe36479c-f1fb-4928-b399-e56c8df9205c-kolla-config\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.065697 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe36479c-f1fb-4928-b399-e56c8df9205c-config-data\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.065721 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe36479c-f1fb-4928-b399-e56c8df9205c-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.065768 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prs6g\" (UniqueName: \"kubernetes.io/projected/fe36479c-f1fb-4928-b399-e56c8df9205c-kube-api-access-prs6g\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.068651 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fe36479c-f1fb-4928-b399-e56c8df9205c-kolla-config\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.068984 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe36479c-f1fb-4928-b399-e56c8df9205c-config-data\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.079204 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe36479c-f1fb-4928-b399-e56c8df9205c-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.089393 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2ddt\" (UniqueName: \"kubernetes.io/projected/ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b-kube-api-access-j2ddt\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.134445 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe36479c-f1fb-4928-b399-e56c8df9205c-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.143857 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prs6g\" (UniqueName: \"kubernetes.io/projected/fe36479c-f1fb-4928-b399-e56c8df9205c-kube-api-access-prs6g\") pod \"memcached-0\" (UID: \"fe36479c-f1fb-4928-b399-e56c8df9205c\") " pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.229990 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.278102 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.291453 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.330212 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 10 11:11:37 crc kubenswrapper[4780]: I1210 11:11:37.548824 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c31145f5-6188-4934-8ceb-a86ac4a0e997","Type":"ContainerStarted","Data":"92b0ffb6b3781c2f65e0b9a822f3f8a23dae54965ab54a8f2291e461371a273f"} Dec 10 11:11:38 crc kubenswrapper[4780]: I1210 11:11:38.601398 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 11:11:38 crc kubenswrapper[4780]: I1210 11:11:38.767408 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 10 11:11:38 crc kubenswrapper[4780]: W1210 11:11:38.815020 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe36479c_f1fb_4928_b399_e56c8df9205c.slice/crio-8e4c28f51e29e8feba5aba9dd33089abb8b37ec639672c585282f5226fcc8754 WatchSource:0}: Error finding container 8e4c28f51e29e8feba5aba9dd33089abb8b37ec639672c585282f5226fcc8754: Status 404 returned error can't find the container with id 8e4c28f51e29e8feba5aba9dd33089abb8b37ec639672c585282f5226fcc8754 Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.191183 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fe36479c-f1fb-4928-b399-e56c8df9205c","Type":"ContainerStarted","Data":"8e4c28f51e29e8feba5aba9dd33089abb8b37ec639672c585282f5226fcc8754"} Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.244586 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b","Type":"ContainerStarted","Data":"8d5ac16c6bea21d7d40359beeac4e49dca72c242f62809038914aad9ad28e31c"} Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.448835 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.457377 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.471122 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-gn4wc" Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.535325 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.568237 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx4fg\" (UniqueName: \"kubernetes.io/projected/82752cbd-d657-4c6d-94f6-e9e75a4452c2-kube-api-access-cx4fg\") pod \"kube-state-metrics-0\" (UID: \"82752cbd-d657-4c6d-94f6-e9e75a4452c2\") " pod="openstack/kube-state-metrics-0" Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.671213 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx4fg\" (UniqueName: \"kubernetes.io/projected/82752cbd-d657-4c6d-94f6-e9e75a4452c2-kube-api-access-cx4fg\") pod \"kube-state-metrics-0\" (UID: \"82752cbd-d657-4c6d-94f6-e9e75a4452c2\") " pod="openstack/kube-state-metrics-0" Dec 10 11:11:40 crc kubenswrapper[4780]: I1210 11:11:40.727997 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx4fg\" (UniqueName: \"kubernetes.io/projected/82752cbd-d657-4c6d-94f6-e9e75a4452c2-kube-api-access-cx4fg\") pod \"kube-state-metrics-0\" (UID: \"82752cbd-d657-4c6d-94f6-e9e75a4452c2\") " pod="openstack/kube-state-metrics-0" Dec 10 11:11:41 crc kubenswrapper[4780]: I1210 11:11:41.092155 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:11:43 crc kubenswrapper[4780]: I1210 11:11:43.557038 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4"] Dec 10 11:11:43 crc kubenswrapper[4780]: I1210 11:11:43.566152 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" Dec 10 11:11:43 crc kubenswrapper[4780]: I1210 11:11:43.578369 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.014883 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4"] Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.130256 4780 patch_prober.go:28] interesting pod/logging-loki-gateway-7d6b48847-8n96n container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.63:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.130375 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7d6b48847-8n96n" podUID="ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.63:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.137878 4780 patch_prober.go:28] interesting pod/logging-loki-gateway-7d6b48847-cgx9d container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.74:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.139260 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-7d6b48847-cgx9d" podUID="dc698901-e923-49fa-bc7f-f4e3f9f0a99b" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.74:8081/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.145019 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6llg\" (UniqueName: \"kubernetes.io/projected/f1e3b9ec-1036-4bee-bbc8-336293208b48-kube-api-access-t6llg\") pod \"observability-ui-dashboards-7d5fb4cbfb-26rg4\" (UID: \"f1e3b9ec-1036-4bee-bbc8-336293208b48\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.145530 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e3b9ec-1036-4bee-bbc8-336293208b48-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-26rg4\" (UID: \"f1e3b9ec-1036-4bee-bbc8-336293208b48\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.215015 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.248165 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e3b9ec-1036-4bee-bbc8-336293208b48-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-26rg4\" (UID: \"f1e3b9ec-1036-4bee-bbc8-336293208b48\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.248286 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6llg\" (UniqueName: \"kubernetes.io/projected/f1e3b9ec-1036-4bee-bbc8-336293208b48-kube-api-access-t6llg\") pod \"observability-ui-dashboards-7d5fb4cbfb-26rg4\" (UID: \"f1e3b9ec-1036-4bee-bbc8-336293208b48\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.248361 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.248214 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.264314 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e3b9ec-1036-4bee-bbc8-336293208b48-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-26rg4\" (UID: \"f1e3b9ec-1036-4bee-bbc8-336293208b48\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.466575 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-mkh28" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.466636 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.467106 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.467303 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.474174 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.474584 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-xtn45" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.906597 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6llg\" (UniqueName: \"kubernetes.io/projected/f1e3b9ec-1036-4bee-bbc8-336293208b48-kube-api-access-t6llg\") pod \"observability-ui-dashboards-7d5fb4cbfb-26rg4\" (UID: \"f1e3b9ec-1036-4bee-bbc8-336293208b48\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.917187 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.919609 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.923702 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.932964 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtssp\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-kube-api-access-xtssp\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.933036 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.933091 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.933163 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.933209 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.933459 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/bef902c7-4e5f-4af9-bda4-0c92b8521901-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.933498 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/bef902c7-4e5f-4af9-bda4-0c92b8521901-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.933526 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-config\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.933827 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.942843 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.942886 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.947996 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.966203 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-ht5h9" Dec 10 11:11:44 crc kubenswrapper[4780]: I1210 11:11:44.967158 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.028384 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.036507 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.046053 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/bef902c7-4e5f-4af9-bda4-0c92b8521901-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.046217 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/bef902c7-4e5f-4af9-bda4-0c92b8521901-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.046271 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-config\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.046512 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtssp\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-kube-api-access-xtssp\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.046591 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.046652 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.046733 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.046784 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.050390 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/bef902c7-4e5f-4af9-bda4-0c92b8521901-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.059184 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.073315 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.074550 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-config\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.078789 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.081353 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-8495f5c58c-ndhht"] Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.121688 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.131300 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/bef902c7-4e5f-4af9-bda4-0c92b8521901-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.139508 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-8495f5c58c-ndhht"] Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.142574 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.155600 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.155759 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.155792 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlwxl\" (UniqueName: \"kubernetes.io/projected/34111627-23c0-44bc-8b84-8cecac15cea1-kube-api-access-dlwxl\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.155844 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/34111627-23c0-44bc-8b84-8cecac15cea1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.155956 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.156006 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34111627-23c0-44bc-8b84-8cecac15cea1-config\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.156124 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/34111627-23c0-44bc-8b84-8cecac15cea1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.156159 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.246448 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtssp\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-kube-api-access-xtssp\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.261549 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1916a14a-a83c-48d0-84f8-3777baa94835-console-serving-cert\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.261711 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpqnn\" (UniqueName: \"kubernetes.io/projected/1916a14a-a83c-48d0-84f8-3777baa94835-kube-api-access-dpqnn\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.261907 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/34111627-23c0-44bc-8b84-8cecac15cea1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.261975 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.262070 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-oauth-serving-cert\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.279038 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.286802 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.343295 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/34111627-23c0-44bc-8b84-8cecac15cea1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.344117 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"prometheus-metric-storage-0\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.345586 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: W1210 11:11:45.350729 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82752cbd_d657_4c6d_94f6_e9e75a4452c2.slice/crio-c7469e1ae2fccd02425e0d2fb3215f70fae34bf139489ae83244595a22003d2b WatchSource:0}: Error finding container c7469e1ae2fccd02425e0d2fb3215f70fae34bf139489ae83244595a22003d2b: Status 404 returned error can't find the container with id c7469e1ae2fccd02425e0d2fb3215f70fae34bf139489ae83244595a22003d2b Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.351341 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-console-config\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.351515 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-service-ca\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.351874 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.352100 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlwxl\" (UniqueName: \"kubernetes.io/projected/34111627-23c0-44bc-8b84-8cecac15cea1-kube-api-access-dlwxl\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.782578 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/34111627-23c0-44bc-8b84-8cecac15cea1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.783317 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.783504 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34111627-23c0-44bc-8b84-8cecac15cea1-config\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.783631 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1916a14a-a83c-48d0-84f8-3777baa94835-console-oauth-config\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.783811 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-trusted-ca-bundle\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.848465 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlwxl\" (UniqueName: \"kubernetes.io/projected/34111627-23c0-44bc-8b84-8cecac15cea1-kube-api-access-dlwxl\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.854038 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34111627-23c0-44bc-8b84-8cecac15cea1-config\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.854423 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/34111627-23c0-44bc-8b84-8cecac15cea1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.875239 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.887936 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.890029 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34111627-23c0-44bc-8b84-8cecac15cea1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"34111627-23c0-44bc-8b84-8cecac15cea1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.915251 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-oauth-serving-cert\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.917047 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-console-config\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.917256 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-service-ca\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.918119 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1916a14a-a83c-48d0-84f8-3777baa94835-console-oauth-config\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.918513 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-trusted-ca-bundle\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.918659 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1916a14a-a83c-48d0-84f8-3777baa94835-console-serving-cert\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.919043 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpqnn\" (UniqueName: \"kubernetes.io/projected/1916a14a-a83c-48d0-84f8-3777baa94835-kube-api-access-dpqnn\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.989888 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-oauth-serving-cert\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.991388 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1916a14a-a83c-48d0-84f8-3777baa94835-console-oauth-config\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.991653 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-service-ca\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:45 crc kubenswrapper[4780]: I1210 11:11:45.994405 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.001424 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1916a14a-a83c-48d0-84f8-3777baa94835-console-serving-cert\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.001604 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-trusted-ca-bundle\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.025182 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1916a14a-a83c-48d0-84f8-3777baa94835-console-config\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.033753 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpqnn\" (UniqueName: \"kubernetes.io/projected/1916a14a-a83c-48d0-84f8-3777baa94835-kube-api-access-dpqnn\") pod \"console-8495f5c58c-ndhht\" (UID: \"1916a14a-a83c-48d0-84f8-3777baa94835\") " pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.078408 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-wt5zb"] Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.080793 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-wt5zb"] Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.081214 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.089613 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-jhmtr" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.102413 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-hzgvf"] Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.107822 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.108124 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.110800 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.154634 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.158500 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hzgvf"] Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.181387 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.234801 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-run\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.235084 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-run-ovn\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.235240 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-ovn-controller-tls-certs\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.235282 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5g8dc\" (UniqueName: \"kubernetes.io/projected/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-kube-api-access-5g8dc\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.235326 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-scripts\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.235799 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-log-ovn\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.235863 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-combined-ca-bundle\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.343603 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-ovn-controller-tls-certs\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.343667 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5g8dc\" (UniqueName: \"kubernetes.io/projected/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-kube-api-access-5g8dc\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.343701 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-scripts\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.343818 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-etc-ovs\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344051 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbmvf\" (UniqueName: \"kubernetes.io/projected/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-kube-api-access-hbmvf\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344112 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-run\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344143 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-lib\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344171 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-log-ovn\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344193 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-log\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344247 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-combined-ca-bundle\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344345 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-run\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344399 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-scripts\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.344430 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-run-ovn\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.345290 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-run-ovn\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.345478 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-log-ovn\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.346104 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-var-run\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.352289 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-scripts\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.386182 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5g8dc\" (UniqueName: \"kubernetes.io/projected/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-kube-api-access-5g8dc\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.420014 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-ovn-controller-tls-certs\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.431001 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bd77f46-f3d3-45a7-bc8e-f3de677e1583-combined-ca-bundle\") pod \"ovn-controller-wt5zb\" (UID: \"6bd77f46-f3d3-45a7-bc8e-f3de677e1583\") " pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.446868 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbmvf\" (UniqueName: \"kubernetes.io/projected/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-kube-api-access-hbmvf\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.446981 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-run\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.447014 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-lib\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.447043 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-log\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.447112 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-scripts\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.447197 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-etc-ovs\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.447599 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-etc-ovs\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.447657 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-lib\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.447725 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-log\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.448545 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-var-run\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.455610 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wt5zb" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.457504 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-scripts\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.475021 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbmvf\" (UniqueName: \"kubernetes.io/projected/1c086cc9-263e-4d8e-b3fb-a64fea7f179c-kube-api-access-hbmvf\") pod \"ovn-controller-ovs-hzgvf\" (UID: \"1c086cc9-263e-4d8e-b3fb-a64fea7f179c\") " pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.482258 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:11:46 crc kubenswrapper[4780]: I1210 11:11:46.965102 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"82752cbd-d657-4c6d-94f6-e9e75a4452c2","Type":"ContainerStarted","Data":"c7469e1ae2fccd02425e0d2fb3215f70fae34bf139489ae83244595a22003d2b"} Dec 10 11:11:47 crc kubenswrapper[4780]: I1210 11:11:47.761538 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4"] Dec 10 11:11:47 crc kubenswrapper[4780]: W1210 11:11:47.795768 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1e3b9ec_1036_4bee_bbc8_336293208b48.slice/crio-f86b9108cd215e2eddf582a2922e322bfaaf51b910f369d765a71b3113610ed4 WatchSource:0}: Error finding container f86b9108cd215e2eddf582a2922e322bfaaf51b910f369d765a71b3113610ed4: Status 404 returned error can't find the container with id f86b9108cd215e2eddf582a2922e322bfaaf51b910f369d765a71b3113610ed4 Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.001319 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" event={"ID":"f1e3b9ec-1036-4bee-bbc8-336293208b48","Type":"ContainerStarted","Data":"f86b9108cd215e2eddf582a2922e322bfaaf51b910f369d765a71b3113610ed4"} Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.204276 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-wt5zb"] Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.321466 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-8495f5c58c-ndhht"] Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.518695 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.573225 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.581654 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.589145 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-sp9hz" Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.607430 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.607623 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.608085 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 10 11:11:48 crc kubenswrapper[4780]: I1210 11:11:48.610044 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.066253 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerStarted","Data":"4413fd9fc37001d8606e09b30b54f7344fdb99fa6f034cbd6c180a34e148583c"} Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.074235 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wt5zb" event={"ID":"6bd77f46-f3d3-45a7-bc8e-f3de677e1583","Type":"ContainerStarted","Data":"ed7edf973f7ec4e1beeb34679e659dd3d15a1c5cb112d47c696dc369999dafda"} Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.081718 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.081869 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.082003 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.083539 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.084174 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-config\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.086118 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9tts\" (UniqueName: \"kubernetes.io/projected/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-kube-api-access-p9tts\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.086169 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.086246 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.103421 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8495f5c58c-ndhht" event={"ID":"1916a14a-a83c-48d0-84f8-3777baa94835","Type":"ContainerStarted","Data":"9f24419bd4f2a493621bd3f3118438e9f83df0d0d29d144d0717a6932d9db953"} Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.205700 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9tts\" (UniqueName: \"kubernetes.io/projected/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-kube-api-access-p9tts\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.205813 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.206049 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.206547 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.206689 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.206948 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.207000 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.207036 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-config\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.209239 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.211572 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-config\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.212174 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.215229 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.226345 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.258470 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.269883 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.280491 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9tts\" (UniqueName: \"kubernetes.io/projected/1b0a6811-e7b0-4c35-b54f-6b7a457b68d1-kube-api-access-p9tts\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.283871 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.319824 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 10 11:11:49 crc kubenswrapper[4780]: I1210 11:11:49.710187 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hzgvf"] Dec 10 11:11:49 crc kubenswrapper[4780]: W1210 11:11:49.738706 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c086cc9_263e_4d8e_b3fb_a64fea7f179c.slice/crio-b177817b145e045fb477b80f41c4d9e306c1476a02e7ad78ce47a9b0378eebb4 WatchSource:0}: Error finding container b177817b145e045fb477b80f41c4d9e306c1476a02e7ad78ce47a9b0378eebb4: Status 404 returned error can't find the container with id b177817b145e045fb477b80f41c4d9e306c1476a02e7ad78ce47a9b0378eebb4 Dec 10 11:11:50 crc kubenswrapper[4780]: I1210 11:11:50.221082 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hzgvf" event={"ID":"1c086cc9-263e-4d8e-b3fb-a64fea7f179c","Type":"ContainerStarted","Data":"b177817b145e045fb477b80f41c4d9e306c1476a02e7ad78ce47a9b0378eebb4"} Dec 10 11:11:50 crc kubenswrapper[4780]: I1210 11:11:50.253027 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8495f5c58c-ndhht" event={"ID":"1916a14a-a83c-48d0-84f8-3777baa94835","Type":"ContainerStarted","Data":"558619bb7a3979339199aee55a418bd2a23a1349f964d0e0dc9938a5ce9ed16e"} Dec 10 11:11:50 crc kubenswrapper[4780]: I1210 11:11:50.296214 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-8495f5c58c-ndhht" podStartSLOduration=6.296174648 podStartE2EDuration="6.296174648s" podCreationTimestamp="2025-12-10 11:11:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:11:50.287048258 +0000 UTC m=+1615.140441711" watchObservedRunningTime="2025-12-10 11:11:50.296174648 +0000 UTC m=+1615.149568091" Dec 10 11:11:50 crc kubenswrapper[4780]: I1210 11:11:50.725874 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 11:11:50 crc kubenswrapper[4780]: W1210 11:11:50.816479 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34111627_23c0_44bc_8b84_8cecac15cea1.slice/crio-3d97859fc094c9f3cb09c1f0cb9e1fd64172a71d7de4b204a88f198e39189828 WatchSource:0}: Error finding container 3d97859fc094c9f3cb09c1f0cb9e1fd64172a71d7de4b204a88f198e39189828: Status 404 returned error can't find the container with id 3d97859fc094c9f3cb09c1f0cb9e1fd64172a71d7de4b204a88f198e39189828 Dec 10 11:11:50 crc kubenswrapper[4780]: I1210 11:11:50.881458 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 11:11:51 crc kubenswrapper[4780]: I1210 11:11:51.693223 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"34111627-23c0-44bc-8b84-8cecac15cea1","Type":"ContainerStarted","Data":"3d97859fc094c9f3cb09c1f0cb9e1fd64172a71d7de4b204a88f198e39189828"} Dec 10 11:11:52 crc kubenswrapper[4780]: I1210 11:11:52.925638 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-jgcc9"] Dec 10 11:11:52 crc kubenswrapper[4780]: I1210 11:11:52.930759 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:52 crc kubenswrapper[4780]: I1210 11:11:52.934154 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 10 11:11:52 crc kubenswrapper[4780]: I1210 11:11:52.973533 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-jgcc9"] Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.082237 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/72e2c2ed-0530-4846-9244-b93076ed5640-ovn-rundir\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.082367 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/72e2c2ed-0530-4846-9244-b93076ed5640-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.082793 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/72e2c2ed-0530-4846-9244-b93076ed5640-ovs-rundir\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.083095 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96t52\" (UniqueName: \"kubernetes.io/projected/72e2c2ed-0530-4846-9244-b93076ed5640-kube-api-access-96t52\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.083535 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72e2c2ed-0530-4846-9244-b93076ed5640-config\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.083657 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72e2c2ed-0530-4846-9244-b93076ed5640-combined-ca-bundle\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.188659 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/72e2c2ed-0530-4846-9244-b93076ed5640-ovn-rundir\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.187997 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/72e2c2ed-0530-4846-9244-b93076ed5640-ovn-rundir\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.189017 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/72e2c2ed-0530-4846-9244-b93076ed5640-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.190505 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/72e2c2ed-0530-4846-9244-b93076ed5640-ovs-rundir\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.190613 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96t52\" (UniqueName: \"kubernetes.io/projected/72e2c2ed-0530-4846-9244-b93076ed5640-kube-api-access-96t52\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.190804 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72e2c2ed-0530-4846-9244-b93076ed5640-config\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.190859 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72e2c2ed-0530-4846-9244-b93076ed5640-combined-ca-bundle\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.191002 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/72e2c2ed-0530-4846-9244-b93076ed5640-ovs-rundir\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.192281 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72e2c2ed-0530-4846-9244-b93076ed5640-config\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.213519 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/72e2c2ed-0530-4846-9244-b93076ed5640-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.220671 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72e2c2ed-0530-4846-9244-b93076ed5640-combined-ca-bundle\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.223836 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96t52\" (UniqueName: \"kubernetes.io/projected/72e2c2ed-0530-4846-9244-b93076ed5640-kube-api-access-96t52\") pod \"ovn-controller-metrics-jgcc9\" (UID: \"72e2c2ed-0530-4846-9244-b93076ed5640\") " pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.281030 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-jgcc9" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.335893 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zc5ks"] Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.382308 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-trtrt"] Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.388097 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.406665 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.480398 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-trtrt"] Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.578443 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.578807 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-config\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.578988 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qh99\" (UniqueName: \"kubernetes.io/projected/ea884c19-2c44-4387-937a-61cc5c9aa861-kube-api-access-4qh99\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.579452 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.599216 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wjfv6"] Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.627312 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-zl2c8"] Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.631418 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.643279 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.685376 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.685512 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.685688 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-config\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.685785 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qh99\" (UniqueName: \"kubernetes.io/projected/ea884c19-2c44-4387-937a-61cc5c9aa861-kube-api-access-4qh99\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.694805 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.694907 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-config\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.697430 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-zl2c8"] Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.698469 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:53 crc kubenswrapper[4780]: I1210 11:11:53.719207 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qh99\" (UniqueName: \"kubernetes.io/projected/ea884c19-2c44-4387-937a-61cc5c9aa861-kube-api-access-4qh99\") pod \"dnsmasq-dns-7fd796d7df-trtrt\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.198961 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.201475 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-config\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.201530 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.201599 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgr7d\" (UniqueName: \"kubernetes.io/projected/9a36247f-4fb6-4ec7-9688-aafdb77c1243-kube-api-access-dgr7d\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.201627 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.201651 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.317426 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgr7d\" (UniqueName: \"kubernetes.io/projected/9a36247f-4fb6-4ec7-9688-aafdb77c1243-kube-api-access-dgr7d\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.317514 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.317547 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.317757 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-config\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.317860 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.322286 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.326192 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-config\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.327506 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.330338 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.380566 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgr7d\" (UniqueName: \"kubernetes.io/projected/9a36247f-4fb6-4ec7-9688-aafdb77c1243-kube-api-access-dgr7d\") pod \"dnsmasq-dns-86db49b7ff-zl2c8\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:54 crc kubenswrapper[4780]: I1210 11:11:54.589364 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:11:56 crc kubenswrapper[4780]: I1210 11:11:56.155411 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:56 crc kubenswrapper[4780]: I1210 11:11:56.156062 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:56 crc kubenswrapper[4780]: I1210 11:11:56.179820 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:56 crc kubenswrapper[4780]: I1210 11:11:56.260789 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-8495f5c58c-ndhht" Dec 10 11:11:56 crc kubenswrapper[4780]: I1210 11:11:56.383971 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-56bd7bfcb4-rl4rr"] Dec 10 11:12:07 crc kubenswrapper[4780]: I1210 11:12:07.033235 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1","Type":"ContainerStarted","Data":"4342e724c1fc4c8fdba2707a4ccb7f405982256947227ad1eec842a79b1da0c7"} Dec 10 11:12:11 crc kubenswrapper[4780]: E1210 11:12:11.960895 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62" Dec 10 11:12:11 crc kubenswrapper[4780]: E1210 11:12:11.964381 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init-config-reloader,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62,Command:[/bin/prometheus-config-reloader],Args:[--watch-interval=0 --listen-address=:8081 --config-file=/etc/prometheus/config/prometheus.yaml.gz --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:reloader-init,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:SHARD,Value:0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/prometheus/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-out,ReadOnly:false,MountPath:/etc/prometheus/config_out,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-0,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xtssp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(bef902c7-4e5f-4af9-bda4-0c92b8521901): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 11:12:11 crc kubenswrapper[4780]: E1210 11:12:11.966390 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" Dec 10 11:12:12 crc kubenswrapper[4780]: E1210 11:12:12.099136 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.247460 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x98wp"] Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.258983 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.302326 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-catalog-content\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.303030 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-utilities\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.303097 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gml4r\" (UniqueName: \"kubernetes.io/projected/212dc98e-3ddc-4efb-8300-968ba8c38626-kube-api-access-gml4r\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.318475 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x98wp"] Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.410109 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-catalog-content\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.410853 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-utilities\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.410890 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gml4r\" (UniqueName: \"kubernetes.io/projected/212dc98e-3ddc-4efb-8300-968ba8c38626-kube-api-access-gml4r\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.411940 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-utilities\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.413747 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-catalog-content\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.450564 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gml4r\" (UniqueName: \"kubernetes.io/projected/212dc98e-3ddc-4efb-8300-968ba8c38626-kube-api-access-gml4r\") pod \"certified-operators-x98wp\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:14 crc kubenswrapper[4780]: I1210 11:12:14.645984 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:21 crc kubenswrapper[4780]: I1210 11:12:21.518283 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-56bd7bfcb4-rl4rr" podUID="21a7de0d-bac7-4258-8062-414b665097c9" containerName="console" containerID="cri-o://4b6362f6257a92f65062a8984f23eab11e5d58078c4b6f95a3e1d67bea700d65" gracePeriod=15 Dec 10 11:12:21 crc kubenswrapper[4780]: I1210 11:12:21.764539 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-56bd7bfcb4-rl4rr_21a7de0d-bac7-4258-8062-414b665097c9/console/0.log" Dec 10 11:12:21 crc kubenswrapper[4780]: I1210 11:12:21.764614 4780 generic.go:334] "Generic (PLEG): container finished" podID="21a7de0d-bac7-4258-8062-414b665097c9" containerID="4b6362f6257a92f65062a8984f23eab11e5d58078c4b6f95a3e1d67bea700d65" exitCode=2 Dec 10 11:12:21 crc kubenswrapper[4780]: I1210 11:12:21.764667 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-56bd7bfcb4-rl4rr" event={"ID":"21a7de0d-bac7-4258-8062-414b665097c9","Type":"ContainerDied","Data":"4b6362f6257a92f65062a8984f23eab11e5d58078c4b6f95a3e1d67bea700d65"} Dec 10 11:12:23 crc kubenswrapper[4780]: E1210 11:12:23.536485 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 10 11:12:23 crc kubenswrapper[4780]: E1210 11:12:23.537247 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7wnlb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(c31145f5-6188-4934-8ceb-a86ac4a0e997): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:23 crc kubenswrapper[4780]: E1210 11:12:23.538776 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="c31145f5-6188-4934-8ceb-a86ac4a0e997" Dec 10 11:12:23 crc kubenswrapper[4780]: E1210 11:12:23.790911 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="c31145f5-6188-4934-8ceb-a86ac4a0e997" Dec 10 11:12:24 crc kubenswrapper[4780]: E1210 11:12:24.965586 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified" Dec 10 11:12:24 crc kubenswrapper[4780]: E1210 11:12:24.966264 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nb9h657h688h657h676h64fhddh676hdbh79h5d5h5d9h565h68hddh64ch68bhf8h579h65dh58h5fbh7bh65fh648h9h5bfh5ch685h545hdch55cq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hbmvf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-hzgvf_openstack(1c086cc9-263e-4d8e-b3fb-a64fea7f179c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:24 crc kubenswrapper[4780]: E1210 11:12:24.967563 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-hzgvf" podUID="1c086cc9-263e-4d8e-b3fb-a64fea7f179c" Dec 10 11:12:24 crc kubenswrapper[4780]: E1210 11:12:24.986014 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 10 11:12:24 crc kubenswrapper[4780]: E1210 11:12:24.986288 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ptcx5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:24 crc kubenswrapper[4780]: E1210 11:12:24.987689 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.004094 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.004334 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bcpxv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(930a45eb-72d1-4060-92de-2e348073eb16): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.006397 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="930a45eb-72d1-4060-92de-2e348073eb16" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.835939 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="930a45eb-72d1-4060-92de-2e348073eb16" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.836164 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\\\"\"" pod="openstack/ovn-controller-ovs-hzgvf" podUID="1c086cc9-263e-4d8e-b3fb-a64fea7f179c" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.836256 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.904216 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.904635 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n5f7h98hf5hb6h57fh94h548hdhf9h7dh5d4hb4h58h588h565h664h58h57fhbchb9h59fh5dh5dbh557h5dfh67h68fh649h5c5h69h5ffhcfq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-prs6g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(fe36479c-f1fb-4928-b399-e56c8df9205c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.906974 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="fe36479c-f1fb-4928-b399-e56c8df9205c" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.965168 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.965434 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j2ddt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:25 crc kubenswrapper[4780]: E1210 11:12:25.966667 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b" Dec 10 11:12:26 crc kubenswrapper[4780]: E1210 11:12:26.250396 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Dec 10 11:12:26 crc kubenswrapper[4780]: E1210 11:12:26.250799 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nb9h657h688h657h676h64fhddh676hdbh79h5d5h5d9h565h68hddh64ch68bhf8h579h65dh58h5fbh7bh65fh648h9h5bfh5ch685h545hdch55cq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5g8dc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-wt5zb_openstack(6bd77f46-f3d3-45a7-bc8e-f3de677e1583): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:26 crc kubenswrapper[4780]: E1210 11:12:26.252315 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-wt5zb" podUID="6bd77f46-f3d3-45a7-bc8e-f3de677e1583" Dec 10 11:12:26 crc kubenswrapper[4780]: E1210 11:12:26.906830 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="fe36479c-f1fb-4928-b399-e56c8df9205c" Dec 10 11:12:26 crc kubenswrapper[4780]: E1210 11:12:26.907313 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-wt5zb" podUID="6bd77f46-f3d3-45a7-bc8e-f3de677e1583" Dec 10 11:12:26 crc kubenswrapper[4780]: E1210 11:12:26.907626 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b" Dec 10 11:12:27 crc kubenswrapper[4780]: E1210 11:12:27.588744 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 11:12:27 crc kubenswrapper[4780]: E1210 11:12:27.589196 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tgttr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-zc5ks_openstack(26190c4b-d1ff-4509-b9cb-572e40141033): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:27 crc kubenswrapper[4780]: E1210 11:12:27.590808 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" podUID="26190c4b-d1ff-4509-b9cb-572e40141033" Dec 10 11:12:28 crc kubenswrapper[4780]: E1210 11:12:28.843004 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:a69da8bbca8a28dd2925f864d51cc31cf761b10532c553095ba40b242ef701cb" Dec 10 11:12:28 crc kubenswrapper[4780]: E1210 11:12:28.843579 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:observability-ui-dashboards,Image:registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:a69da8bbca8a28dd2925f864d51cc31cf761b10532c553095ba40b242ef701cb,Command:[],Args:[-port=9443 -cert=/var/serving-cert/tls.crt -key=/var/serving-cert/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:web,HostPort:0,ContainerPort:9443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:serving-cert,ReadOnly:true,MountPath:/var/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t6llg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod observability-ui-dashboards-7d5fb4cbfb-26rg4_openshift-operators(f1e3b9ec-1036-4bee-bbc8-336293208b48): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 11:12:28 crc kubenswrapper[4780]: E1210 11:12:28.844782 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"observability-ui-dashboards\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" podUID="f1e3b9ec-1036-4bee-bbc8-336293208b48" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.005825 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d8nw2"] Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.010468 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.025628 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8nw2"] Dec 10 11:12:29 crc kubenswrapper[4780]: E1210 11:12:29.064014 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 11:12:29 crc kubenswrapper[4780]: E1210 11:12:29.064254 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6ptlx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-sqq22_openstack(c6a6a404-b951-4032-a505-7e41cc0a6eeb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:29 crc kubenswrapper[4780]: E1210 11:12:29.065524 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" podUID="c6a6a404-b951-4032-a505-7e41cc0a6eeb" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.166423 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-utilities\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.166885 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdqk5\" (UniqueName: \"kubernetes.io/projected/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-kube-api-access-mdqk5\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.167523 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-catalog-content\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.270519 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-catalog-content\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.270660 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-utilities\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.270735 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdqk5\" (UniqueName: \"kubernetes.io/projected/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-kube-api-access-mdqk5\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.271504 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-catalog-content\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.271544 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-utilities\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: E1210 11:12:29.304338 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"observability-ui-dashboards\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/dashboards-console-plugin-rhel9@sha256:a69da8bbca8a28dd2925f864d51cc31cf761b10532c553095ba40b242ef701cb\\\"\"" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" podUID="f1e3b9ec-1036-4bee-bbc8-336293208b48" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.315937 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdqk5\" (UniqueName: \"kubernetes.io/projected/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-kube-api-access-mdqk5\") pod \"redhat-marketplace-d8nw2\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:29 crc kubenswrapper[4780]: I1210 11:12:29.354366 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:12:30 crc kubenswrapper[4780]: I1210 11:12:30.240846 4780 patch_prober.go:28] interesting pod/console-56bd7bfcb4-rl4rr container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.90:8443/health\": dial tcp 10.217.0.90:8443: i/o timeout" start-of-body= Dec 10 11:12:30 crc kubenswrapper[4780]: I1210 11:12:30.241644 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-56bd7bfcb4-rl4rr" podUID="21a7de0d-bac7-4258-8062-414b665097c9" containerName="console" probeResult="failure" output="Get \"https://10.217.0.90:8443/health\": dial tcp 10.217.0.90:8443: i/o timeout" Dec 10 11:12:31 crc kubenswrapper[4780]: E1210 11:12:31.775765 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 11:12:31 crc kubenswrapper[4780]: E1210 11:12:31.776046 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r8qzn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-wjfv6_openstack(295c3a80-ef4d-4d10-b21b-14387a75a1af): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:31 crc kubenswrapper[4780]: E1210 11:12:31.777198 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" podUID="295c3a80-ef4d-4d10-b21b-14387a75a1af" Dec 10 11:12:31 crc kubenswrapper[4780]: E1210 11:12:31.804386 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 11:12:31 crc kubenswrapper[4780]: E1210 11:12:31.804610 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-km2zh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-cgslz_openstack(b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:12:31 crc kubenswrapper[4780]: E1210 11:12:31.805777 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" podUID="b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75" Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.875842 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-56bd7bfcb4-rl4rr_21a7de0d-bac7-4258-8062-414b665097c9/console/0.log" Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.876481 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.893000 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.992896 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-trusted-ca-bundle\") pod \"21a7de0d-bac7-4258-8062-414b665097c9\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.992990 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-oauth-config\") pod \"21a7de0d-bac7-4258-8062-414b665097c9\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.993025 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-dns-svc\") pod \"26190c4b-d1ff-4509-b9cb-572e40141033\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.993089 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htdcj\" (UniqueName: \"kubernetes.io/projected/21a7de0d-bac7-4258-8062-414b665097c9-kube-api-access-htdcj\") pod \"21a7de0d-bac7-4258-8062-414b665097c9\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.993159 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-serving-cert\") pod \"21a7de0d-bac7-4258-8062-414b665097c9\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.993241 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgttr\" (UniqueName: \"kubernetes.io/projected/26190c4b-d1ff-4509-b9cb-572e40141033-kube-api-access-tgttr\") pod \"26190c4b-d1ff-4509-b9cb-572e40141033\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.993272 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-config\") pod \"26190c4b-d1ff-4509-b9cb-572e40141033\" (UID: \"26190c4b-d1ff-4509-b9cb-572e40141033\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.993444 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-service-ca\") pod \"21a7de0d-bac7-4258-8062-414b665097c9\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.993556 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-oauth-serving-cert\") pod \"21a7de0d-bac7-4258-8062-414b665097c9\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.993583 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-console-config\") pod \"21a7de0d-bac7-4258-8062-414b665097c9\" (UID: \"21a7de0d-bac7-4258-8062-414b665097c9\") " Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.995780 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-console-config" (OuterVolumeSpecName: "console-config") pod "21a7de0d-bac7-4258-8062-414b665097c9" (UID: "21a7de0d-bac7-4258-8062-414b665097c9"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.996473 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-service-ca" (OuterVolumeSpecName: "service-ca") pod "21a7de0d-bac7-4258-8062-414b665097c9" (UID: "21a7de0d-bac7-4258-8062-414b665097c9"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.997657 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "21a7de0d-bac7-4258-8062-414b665097c9" (UID: "21a7de0d-bac7-4258-8062-414b665097c9"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.997899 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "26190c4b-d1ff-4509-b9cb-572e40141033" (UID: "26190c4b-d1ff-4509-b9cb-572e40141033"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:31 crc kubenswrapper[4780]: I1210 11:12:31.998307 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "21a7de0d-bac7-4258-8062-414b665097c9" (UID: "21a7de0d-bac7-4258-8062-414b665097c9"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.001636 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-config" (OuterVolumeSpecName: "config") pod "26190c4b-d1ff-4509-b9cb-572e40141033" (UID: "26190c4b-d1ff-4509-b9cb-572e40141033"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.004315 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "21a7de0d-bac7-4258-8062-414b665097c9" (UID: "21a7de0d-bac7-4258-8062-414b665097c9"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.015296 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "21a7de0d-bac7-4258-8062-414b665097c9" (UID: "21a7de0d-bac7-4258-8062-414b665097c9"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.020679 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21a7de0d-bac7-4258-8062-414b665097c9-kube-api-access-htdcj" (OuterVolumeSpecName: "kube-api-access-htdcj") pod "21a7de0d-bac7-4258-8062-414b665097c9" (UID: "21a7de0d-bac7-4258-8062-414b665097c9"). InnerVolumeSpecName "kube-api-access-htdcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.020950 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26190c4b-d1ff-4509-b9cb-572e40141033-kube-api-access-tgttr" (OuterVolumeSpecName: "kube-api-access-tgttr") pod "26190c4b-d1ff-4509-b9cb-572e40141033" (UID: "26190c4b-d1ff-4509-b9cb-572e40141033"). InnerVolumeSpecName "kube-api-access-tgttr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098711 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgttr\" (UniqueName: \"kubernetes.io/projected/26190c4b-d1ff-4509-b9cb-572e40141033-kube-api-access-tgttr\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098775 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098791 4780 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098809 4780 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098827 4780 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098841 4780 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21a7de0d-bac7-4258-8062-414b665097c9-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098855 4780 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098868 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/26190c4b-d1ff-4509-b9cb-572e40141033-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098882 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htdcj\" (UniqueName: \"kubernetes.io/projected/21a7de0d-bac7-4258-8062-414b665097c9-kube-api-access-htdcj\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.098894 4780 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/21a7de0d-bac7-4258-8062-414b665097c9-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.338714 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.412296 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" event={"ID":"26190c4b-d1ff-4509-b9cb-572e40141033","Type":"ContainerDied","Data":"be12da5061108dd504c7d1e7803ca425afb4dac48ad8b66d6e2a3a5d80cb08d7"} Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.412350 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-zc5ks" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.423305 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.423330 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-sqq22" event={"ID":"c6a6a404-b951-4032-a505-7e41cc0a6eeb","Type":"ContainerDied","Data":"99030aadd463cef8a258fe4c1ec3ad59fe3e666340e5ee0e3d1ad1d6414947e9"} Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.428763 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-56bd7bfcb4-rl4rr_21a7de0d-bac7-4258-8062-414b665097c9/console/0.log" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.428855 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-56bd7bfcb4-rl4rr" event={"ID":"21a7de0d-bac7-4258-8062-414b665097c9","Type":"ContainerDied","Data":"2c7b4c866e626ba5957056aa05c26c06f996890c5c81c1eea95d8e61f92a99d0"} Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.428959 4780 scope.go:117] "RemoveContainer" containerID="4b6362f6257a92f65062a8984f23eab11e5d58078c4b6f95a3e1d67bea700d65" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.429627 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-56bd7bfcb4-rl4rr" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.518201 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-dns-svc\") pod \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.518384 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-config\") pod \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.518432 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ptlx\" (UniqueName: \"kubernetes.io/projected/c6a6a404-b951-4032-a505-7e41cc0a6eeb-kube-api-access-6ptlx\") pod \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\" (UID: \"c6a6a404-b951-4032-a505-7e41cc0a6eeb\") " Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.522484 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c6a6a404-b951-4032-a505-7e41cc0a6eeb" (UID: "c6a6a404-b951-4032-a505-7e41cc0a6eeb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.535859 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6a6a404-b951-4032-a505-7e41cc0a6eeb-kube-api-access-6ptlx" (OuterVolumeSpecName: "kube-api-access-6ptlx") pod "c6a6a404-b951-4032-a505-7e41cc0a6eeb" (UID: "c6a6a404-b951-4032-a505-7e41cc0a6eeb"). InnerVolumeSpecName "kube-api-access-6ptlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.537547 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-config" (OuterVolumeSpecName: "config") pod "c6a6a404-b951-4032-a505-7e41cc0a6eeb" (UID: "c6a6a404-b951-4032-a505-7e41cc0a6eeb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.583686 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zc5ks"] Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.621756 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-zc5ks"] Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.629530 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ptlx\" (UniqueName: \"kubernetes.io/projected/c6a6a404-b951-4032-a505-7e41cc0a6eeb-kube-api-access-6ptlx\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.629595 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.629610 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6a6a404-b951-4032-a505-7e41cc0a6eeb-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.686266 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-56bd7bfcb4-rl4rr"] Dec 10 11:12:32 crc kubenswrapper[4780]: I1210 11:12:32.906320 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-56bd7bfcb4-rl4rr"] Dec 10 11:12:33 crc kubenswrapper[4780]: I1210 11:12:33.109656 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sqq22"] Dec 10 11:12:33 crc kubenswrapper[4780]: I1210 11:12:33.168893 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sqq22"] Dec 10 11:12:33 crc kubenswrapper[4780]: I1210 11:12:33.609873 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:12:33 crc kubenswrapper[4780]: I1210 11:12:33.711678 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:12:33 crc kubenswrapper[4780]: I1210 11:12:33.721126 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-km2zh\" (UniqueName: \"kubernetes.io/projected/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-kube-api-access-km2zh\") pod \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\" (UID: \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\") " Dec 10 11:12:33 crc kubenswrapper[4780]: I1210 11:12:33.721379 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-config\") pod \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\" (UID: \"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75\") " Dec 10 11:12:33 crc kubenswrapper[4780]: I1210 11:12:33.722767 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-config" (OuterVolumeSpecName: "config") pod "b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75" (UID: "b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:33 crc kubenswrapper[4780]: I1210 11:12:33.728710 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-kube-api-access-km2zh" (OuterVolumeSpecName: "kube-api-access-km2zh") pod "b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75" (UID: "b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75"). InnerVolumeSpecName "kube-api-access-km2zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.823904 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8qzn\" (UniqueName: \"kubernetes.io/projected/295c3a80-ef4d-4d10-b21b-14387a75a1af-kube-api-access-r8qzn\") pod \"295c3a80-ef4d-4d10-b21b-14387a75a1af\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.824612 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-dns-svc\") pod \"295c3a80-ef4d-4d10-b21b-14387a75a1af\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.824762 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-config\") pod \"295c3a80-ef4d-4d10-b21b-14387a75a1af\" (UID: \"295c3a80-ef4d-4d10-b21b-14387a75a1af\") " Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.825477 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-km2zh\" (UniqueName: \"kubernetes.io/projected/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-kube-api-access-km2zh\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.825494 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.826454 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "295c3a80-ef4d-4d10-b21b-14387a75a1af" (UID: "295c3a80-ef4d-4d10-b21b-14387a75a1af"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.826512 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-config" (OuterVolumeSpecName: "config") pod "295c3a80-ef4d-4d10-b21b-14387a75a1af" (UID: "295c3a80-ef4d-4d10-b21b-14387a75a1af"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.833368 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/295c3a80-ef4d-4d10-b21b-14387a75a1af-kube-api-access-r8qzn" (OuterVolumeSpecName: "kube-api-access-r8qzn") pod "295c3a80-ef4d-4d10-b21b-14387a75a1af" (UID: "295c3a80-ef4d-4d10-b21b-14387a75a1af"). InnerVolumeSpecName "kube-api-access-r8qzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.898350 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-jgcc9"] Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.925300 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-zl2c8"] Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.929765 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.929802 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/295c3a80-ef4d-4d10-b21b-14387a75a1af-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.929814 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8qzn\" (UniqueName: \"kubernetes.io/projected/295c3a80-ef4d-4d10-b21b-14387a75a1af-kube-api-access-r8qzn\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.989882 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21a7de0d-bac7-4258-8062-414b665097c9" path="/var/lib/kubelet/pods/21a7de0d-bac7-4258-8062-414b665097c9/volumes" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.991294 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26190c4b-d1ff-4509-b9cb-572e40141033" path="/var/lib/kubelet/pods/26190c4b-d1ff-4509-b9cb-572e40141033/volumes" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.992013 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6a6a404-b951-4032-a505-7e41cc0a6eeb" path="/var/lib/kubelet/pods/c6a6a404-b951-4032-a505-7e41cc0a6eeb/volumes" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:33.997237 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-trtrt"] Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:34.158332 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x98wp"] Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.069114 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" event={"ID":"b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75","Type":"ContainerDied","Data":"866ce9a412badb11ef29ee317108b07a9be921236db69f455c7746c3c658eb7d"} Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.069768 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-cgslz" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.090688 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" event={"ID":"295c3a80-ef4d-4d10-b21b-14387a75a1af","Type":"ContainerDied","Data":"747290fc2b556c1e90348762ec2e06ec352ef19fa58d9de84b7d5513287c1471"} Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.090838 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-wjfv6" Dec 10 11:12:35 crc kubenswrapper[4780]: W1210 11:12:35.115102 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea884c19_2c44_4387_937a_61cc5c9aa861.slice/crio-ac116eeb70d9da32c1688a89a0a2c6cbf44d146184f0e589384985257ce94f9e WatchSource:0}: Error finding container ac116eeb70d9da32c1688a89a0a2c6cbf44d146184f0e589384985257ce94f9e: Status 404 returned error can't find the container with id ac116eeb70d9da32c1688a89a0a2c6cbf44d146184f0e589384985257ce94f9e Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.208188 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cgslz"] Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.222089 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cgslz"] Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.244649 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wjfv6"] Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.254711 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-wjfv6"] Dec 10 11:12:35 crc kubenswrapper[4780]: E1210 11:12:35.848134 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:16797bc33772d7e098ba8b49ca7caf9bc2850095ce37074dc4fb8d938c0ea8d5: Get \"https://registry.k8s.io/v2/kube-state-metrics/kube-state-metrics/blobs/sha256:16797bc33772d7e098ba8b49ca7caf9bc2850095ce37074dc4fb8d938c0ea8d5\": context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Dec 10 11:12:35 crc kubenswrapper[4780]: E1210 11:12:35.848749 4780 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:16797bc33772d7e098ba8b49ca7caf9bc2850095ce37074dc4fb8d938c0ea8d5: Get \"https://registry.k8s.io/v2/kube-state-metrics/kube-state-metrics/blobs/sha256:16797bc33772d7e098ba8b49ca7caf9bc2850095ce37074dc4fb8d938c0ea8d5\": context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Dec 10 11:12:35 crc kubenswrapper[4780]: E1210 11:12:35.849083 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cx4fg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(82752cbd-d657-4c6d-94f6-e9e75a4452c2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:16797bc33772d7e098ba8b49ca7caf9bc2850095ce37074dc4fb8d938c0ea8d5: Get \"https://registry.k8s.io/v2/kube-state-metrics/kube-state-metrics/blobs/sha256:16797bc33772d7e098ba8b49ca7caf9bc2850095ce37074dc4fb8d938c0ea8d5\": context canceled" logger="UnhandledError" Dec 10 11:12:35 crc kubenswrapper[4780]: E1210 11:12:35.850559 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:16797bc33772d7e098ba8b49ca7caf9bc2850095ce37074dc4fb8d938c0ea8d5: Get \\\"https://registry.k8s.io/v2/kube-state-metrics/kube-state-metrics/blobs/sha256:16797bc33772d7e098ba8b49ca7caf9bc2850095ce37074dc4fb8d938c0ea8d5\\\": context canceled\"" pod="openstack/kube-state-metrics-0" podUID="82752cbd-d657-4c6d-94f6-e9e75a4452c2" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.943256 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8nw2"] Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.979493 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="295c3a80-ef4d-4d10-b21b-14387a75a1af" path="/var/lib/kubelet/pods/295c3a80-ef4d-4d10-b21b-14387a75a1af/volumes" Dec 10 11:12:35 crc kubenswrapper[4780]: I1210 11:12:35.980120 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75" path="/var/lib/kubelet/pods/b9cf3b6c-c9be-44d8-ab60-bb9dae0ecc75/volumes" Dec 10 11:12:36 crc kubenswrapper[4780]: I1210 11:12:36.155126 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-jgcc9" event={"ID":"72e2c2ed-0530-4846-9244-b93076ed5640","Type":"ContainerStarted","Data":"55d2aa1d2710cd26e4a1c18d1164a708f4c80ee46e603221748165964b78511b"} Dec 10 11:12:36 crc kubenswrapper[4780]: I1210 11:12:36.170666 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" event={"ID":"9a36247f-4fb6-4ec7-9688-aafdb77c1243","Type":"ContainerStarted","Data":"319aabb3e08290ef6764d8a93d1588922d2f2f89834ee6b3353d41554eb58bef"} Dec 10 11:12:36 crc kubenswrapper[4780]: I1210 11:12:36.178361 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" event={"ID":"ea884c19-2c44-4387-937a-61cc5c9aa861","Type":"ContainerStarted","Data":"ac116eeb70d9da32c1688a89a0a2c6cbf44d146184f0e589384985257ce94f9e"} Dec 10 11:12:36 crc kubenswrapper[4780]: I1210 11:12:36.188190 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1","Type":"ContainerStarted","Data":"71134e9689116fd7d1cfeb2bf534e649c8f6c690051471176984d6d75bfa8c64"} Dec 10 11:12:36 crc kubenswrapper[4780]: I1210 11:12:36.200336 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x98wp" event={"ID":"212dc98e-3ddc-4efb-8300-968ba8c38626","Type":"ContainerStarted","Data":"499d65c4d8b3edc76ff008319ed38533470be8088e64e1c3386f934ff98b9776"} Dec 10 11:12:36 crc kubenswrapper[4780]: I1210 11:12:36.210415 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8nw2" event={"ID":"200a4e5a-e0a1-4e36-8843-147fcf1de0b4","Type":"ContainerStarted","Data":"c9e0906bac7ef2e7f92e16979c48b39d3285cd13178ad4d7c16641abc15d666f"} Dec 10 11:12:36 crc kubenswrapper[4780]: E1210 11:12:36.243265 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="82752cbd-d657-4c6d-94f6-e9e75a4452c2" Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.227229 4780 generic.go:334] "Generic (PLEG): container finished" podID="ea884c19-2c44-4387-937a-61cc5c9aa861" containerID="5b494c12cdf05c7bbec2175886a8193f26bbe3f59065417e4086c55eb250cf30" exitCode=0 Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.227655 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" event={"ID":"ea884c19-2c44-4387-937a-61cc5c9aa861","Type":"ContainerDied","Data":"5b494c12cdf05c7bbec2175886a8193f26bbe3f59065417e4086c55eb250cf30"} Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.233841 4780 generic.go:334] "Generic (PLEG): container finished" podID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerID="71eb4039d1ca08b1cc21985a283e384cd25949a02d25695aebea24cbd2ad46d8" exitCode=0 Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.233937 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x98wp" event={"ID":"212dc98e-3ddc-4efb-8300-968ba8c38626","Type":"ContainerDied","Data":"71eb4039d1ca08b1cc21985a283e384cd25949a02d25695aebea24cbd2ad46d8"} Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.240838 4780 generic.go:334] "Generic (PLEG): container finished" podID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerID="a9bf80d57521f9130a8c906cadd4a2019e92ea86c6d3c66204eb8ff31a515645" exitCode=0 Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.240979 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8nw2" event={"ID":"200a4e5a-e0a1-4e36-8843-147fcf1de0b4","Type":"ContainerDied","Data":"a9bf80d57521f9130a8c906cadd4a2019e92ea86c6d3c66204eb8ff31a515645"} Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.252266 4780 generic.go:334] "Generic (PLEG): container finished" podID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerID="83b317a9232272a9cdac91028a239c2a11ddcf7386bb31fc89962723d5d556b3" exitCode=0 Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.252438 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" event={"ID":"9a36247f-4fb6-4ec7-9688-aafdb77c1243","Type":"ContainerDied","Data":"83b317a9232272a9cdac91028a239c2a11ddcf7386bb31fc89962723d5d556b3"} Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.267758 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"34111627-23c0-44bc-8b84-8cecac15cea1","Type":"ContainerStarted","Data":"28ce4e77f786c60412ed60e7e138dfc49c0d3b790af61bf5d948b12c5e951d9e"} Dec 10 11:12:37 crc kubenswrapper[4780]: I1210 11:12:37.276395 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c31145f5-6188-4934-8ceb-a86ac4a0e997","Type":"ContainerStarted","Data":"0e2fb17ef254b303f74829309a3e034b0054c810641a0806519ae454a4cc7cae"} Dec 10 11:12:38 crc kubenswrapper[4780]: I1210 11:12:38.702170 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x98wp" event={"ID":"212dc98e-3ddc-4efb-8300-968ba8c38626","Type":"ContainerStarted","Data":"5c2fba807e54e05dfd2af3a2ed774932fd34e8f7d29a16542eac79f887cf9c67"} Dec 10 11:12:38 crc kubenswrapper[4780]: I1210 11:12:38.727936 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8nw2" event={"ID":"200a4e5a-e0a1-4e36-8843-147fcf1de0b4","Type":"ContainerStarted","Data":"4f96a19c7e961068fe26936d1c9f91d6ba0b50df2234ec005171e7fc37dd0040"} Dec 10 11:12:38 crc kubenswrapper[4780]: I1210 11:12:38.745519 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" event={"ID":"9a36247f-4fb6-4ec7-9688-aafdb77c1243","Type":"ContainerStarted","Data":"8a8467c9bf3fe827578e6033e1687ecd47450d4c35fa7663c421090f496acbfe"} Dec 10 11:12:38 crc kubenswrapper[4780]: I1210 11:12:38.746198 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:12:38 crc kubenswrapper[4780]: I1210 11:12:38.754382 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" event={"ID":"ea884c19-2c44-4387-937a-61cc5c9aa861","Type":"ContainerStarted","Data":"d60b55452dfdf6264000c7b3c47c989af6a55b39db9fa20b3fa3983ad7603f99"} Dec 10 11:12:38 crc kubenswrapper[4780]: I1210 11:12:38.755266 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:12:38 crc kubenswrapper[4780]: I1210 11:12:38.819599 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" podStartSLOduration=44.880319088 podStartE2EDuration="45.819562965s" podCreationTimestamp="2025-12-10 11:11:53 +0000 UTC" firstStartedPulling="2025-12-10 11:12:35.135259471 +0000 UTC m=+1659.988652914" lastFinishedPulling="2025-12-10 11:12:36.074503348 +0000 UTC m=+1660.927896791" observedRunningTime="2025-12-10 11:12:38.793623184 +0000 UTC m=+1663.647016627" watchObservedRunningTime="2025-12-10 11:12:38.819562965 +0000 UTC m=+1663.672956408" Dec 10 11:12:38 crc kubenswrapper[4780]: I1210 11:12:38.835874 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" podStartSLOduration=45.037541458 podStartE2EDuration="45.835845561s" podCreationTimestamp="2025-12-10 11:11:53 +0000 UTC" firstStartedPulling="2025-12-10 11:12:35.271865135 +0000 UTC m=+1660.125258578" lastFinishedPulling="2025-12-10 11:12:36.070169248 +0000 UTC m=+1660.923562681" observedRunningTime="2025-12-10 11:12:38.830456923 +0000 UTC m=+1663.683850386" watchObservedRunningTime="2025-12-10 11:12:38.835845561 +0000 UTC m=+1663.689239004" Dec 10 11:12:39 crc kubenswrapper[4780]: I1210 11:12:39.776456 4780 generic.go:334] "Generic (PLEG): container finished" podID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerID="4f96a19c7e961068fe26936d1c9f91d6ba0b50df2234ec005171e7fc37dd0040" exitCode=0 Dec 10 11:12:39 crc kubenswrapper[4780]: I1210 11:12:39.776553 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8nw2" event={"ID":"200a4e5a-e0a1-4e36-8843-147fcf1de0b4","Type":"ContainerDied","Data":"4f96a19c7e961068fe26936d1c9f91d6ba0b50df2234ec005171e7fc37dd0040"} Dec 10 11:12:39 crc kubenswrapper[4780]: I1210 11:12:39.782187 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b","Type":"ContainerStarted","Data":"7022a6b60831aa66636b0cac1b288d2f037264625e94f54dbf578d7a0ac89f86"} Dec 10 11:12:41 crc kubenswrapper[4780]: I1210 11:12:41.108235 4780 generic.go:334] "Generic (PLEG): container finished" podID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerID="5c2fba807e54e05dfd2af3a2ed774932fd34e8f7d29a16542eac79f887cf9c67" exitCode=0 Dec 10 11:12:41 crc kubenswrapper[4780]: I1210 11:12:41.109403 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x98wp" event={"ID":"212dc98e-3ddc-4efb-8300-968ba8c38626","Type":"ContainerDied","Data":"5c2fba807e54e05dfd2af3a2ed774932fd34e8f7d29a16542eac79f887cf9c67"} Dec 10 11:12:42 crc kubenswrapper[4780]: I1210 11:12:42.123016 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fe36479c-f1fb-4928-b399-e56c8df9205c","Type":"ContainerStarted","Data":"d95f7e9da5d1c20a25de39c347701c35887fbbea406f2d3f352fbe5e124b83c4"} Dec 10 11:12:42 crc kubenswrapper[4780]: I1210 11:12:42.123793 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 10 11:12:42 crc kubenswrapper[4780]: I1210 11:12:42.149200 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=6.222344942 podStartE2EDuration="1m6.149176443s" podCreationTimestamp="2025-12-10 11:11:36 +0000 UTC" firstStartedPulling="2025-12-10 11:11:38.890020595 +0000 UTC m=+1603.743414048" lastFinishedPulling="2025-12-10 11:12:38.816852106 +0000 UTC m=+1663.670245549" observedRunningTime="2025-12-10 11:12:42.142167574 +0000 UTC m=+1666.995561017" watchObservedRunningTime="2025-12-10 11:12:42.149176443 +0000 UTC m=+1667.002569886" Dec 10 11:12:44 crc kubenswrapper[4780]: I1210 11:12:44.941247 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:12:44 crc kubenswrapper[4780]: I1210 11:12:44.972899 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:12:45 crc kubenswrapper[4780]: I1210 11:12:45.220456 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-trtrt"] Dec 10 11:12:46 crc kubenswrapper[4780]: I1210 11:12:46.023583 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" podUID="ea884c19-2c44-4387-937a-61cc5c9aa861" containerName="dnsmasq-dns" containerID="cri-o://d60b55452dfdf6264000c7b3c47c989af6a55b39db9fa20b3fa3983ad7603f99" gracePeriod=10 Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.039936 4780 generic.go:334] "Generic (PLEG): container finished" podID="ea884c19-2c44-4387-937a-61cc5c9aa861" containerID="d60b55452dfdf6264000c7b3c47c989af6a55b39db9fa20b3fa3983ad7603f99" exitCode=0 Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.040520 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" event={"ID":"ea884c19-2c44-4387-937a-61cc5c9aa861","Type":"ContainerDied","Data":"d60b55452dfdf6264000c7b3c47c989af6a55b39db9fa20b3fa3983ad7603f99"} Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.280234 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.867783 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.981546 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qh99\" (UniqueName: \"kubernetes.io/projected/ea884c19-2c44-4387-937a-61cc5c9aa861-kube-api-access-4qh99\") pod \"ea884c19-2c44-4387-937a-61cc5c9aa861\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.981968 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-config\") pod \"ea884c19-2c44-4387-937a-61cc5c9aa861\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.982151 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-dns-svc\") pod \"ea884c19-2c44-4387-937a-61cc5c9aa861\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.982240 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-ovsdbserver-nb\") pod \"ea884c19-2c44-4387-937a-61cc5c9aa861\" (UID: \"ea884c19-2c44-4387-937a-61cc5c9aa861\") " Dec 10 11:12:47 crc kubenswrapper[4780]: I1210 11:12:47.994672 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea884c19-2c44-4387-937a-61cc5c9aa861-kube-api-access-4qh99" (OuterVolumeSpecName: "kube-api-access-4qh99") pod "ea884c19-2c44-4387-937a-61cc5c9aa861" (UID: "ea884c19-2c44-4387-937a-61cc5c9aa861"). InnerVolumeSpecName "kube-api-access-4qh99". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.064544 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-config" (OuterVolumeSpecName: "config") pod "ea884c19-2c44-4387-937a-61cc5c9aa861" (UID: "ea884c19-2c44-4387-937a-61cc5c9aa861"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.069996 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ea884c19-2c44-4387-937a-61cc5c9aa861" (UID: "ea884c19-2c44-4387-937a-61cc5c9aa861"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.070203 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" event={"ID":"ea884c19-2c44-4387-937a-61cc5c9aa861","Type":"ContainerDied","Data":"ac116eeb70d9da32c1688a89a0a2c6cbf44d146184f0e589384985257ce94f9e"} Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.070283 4780 scope.go:117] "RemoveContainer" containerID="d60b55452dfdf6264000c7b3c47c989af6a55b39db9fa20b3fa3983ad7603f99" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.070574 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-trtrt" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.078567 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ea884c19-2c44-4387-937a-61cc5c9aa861" (UID: "ea884c19-2c44-4387-937a-61cc5c9aa861"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.090775 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.093183 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qh99\" (UniqueName: \"kubernetes.io/projected/ea884c19-2c44-4387-937a-61cc5c9aa861-kube-api-access-4qh99\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.093299 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.093319 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea884c19-2c44-4387-937a-61cc5c9aa861-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.314363 4780 scope.go:117] "RemoveContainer" containerID="5b494c12cdf05c7bbec2175886a8193f26bbe3f59065417e4086c55eb250cf30" Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.791886 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-trtrt"] Dec 10 11:12:48 crc kubenswrapper[4780]: I1210 11:12:48.810432 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-trtrt"] Dec 10 11:12:49 crc kubenswrapper[4780]: I1210 11:12:49.091898 4780 generic.go:334] "Generic (PLEG): container finished" podID="c31145f5-6188-4934-8ceb-a86ac4a0e997" containerID="0e2fb17ef254b303f74829309a3e034b0054c810641a0806519ae454a4cc7cae" exitCode=0 Dec 10 11:12:49 crc kubenswrapper[4780]: I1210 11:12:49.092463 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c31145f5-6188-4934-8ceb-a86ac4a0e997","Type":"ContainerDied","Data":"0e2fb17ef254b303f74829309a3e034b0054c810641a0806519ae454a4cc7cae"} Dec 10 11:12:49 crc kubenswrapper[4780]: I1210 11:12:49.982786 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea884c19-2c44-4387-937a-61cc5c9aa861" path="/var/lib/kubelet/pods/ea884c19-2c44-4387-937a-61cc5c9aa861/volumes" Dec 10 11:12:50 crc kubenswrapper[4780]: I1210 11:12:50.127429 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8nw2" event={"ID":"200a4e5a-e0a1-4e36-8843-147fcf1de0b4","Type":"ContainerStarted","Data":"19fd7d2dee1bc4e5995f821bcaaa82497fca8f4ce333d741a50ee82d98bbf683"} Dec 10 11:12:50 crc kubenswrapper[4780]: I1210 11:12:50.129322 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hzgvf" event={"ID":"1c086cc9-263e-4d8e-b3fb-a64fea7f179c","Type":"ContainerStarted","Data":"5170f30541367b32144ad9f3f93966e4f95459adc815afbbb4c9569ce5828d08"} Dec 10 11:12:50 crc kubenswrapper[4780]: I1210 11:12:50.131475 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"34111627-23c0-44bc-8b84-8cecac15cea1","Type":"ContainerStarted","Data":"48ba261593f0035cfa459361f414966744c7ab4d0a5f3a1ab76002b5670223de"} Dec 10 11:12:50 crc kubenswrapper[4780]: I1210 11:12:50.133221 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b0a6811-e7b0-4c35-b54f-6b7a457b68d1","Type":"ContainerStarted","Data":"be765e33a493e859ec179a9526874dbee230864d1c28d720d3e5733771539527"} Dec 10 11:12:50 crc kubenswrapper[4780]: I1210 11:12:50.138407 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x98wp" event={"ID":"212dc98e-3ddc-4efb-8300-968ba8c38626","Type":"ContainerStarted","Data":"d75ad5c0ee78653752504b1129a63b9c964506969dabbe920ad3b01a94cdc506"} Dec 10 11:12:50 crc kubenswrapper[4780]: I1210 11:12:50.154970 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d8nw2" podStartSLOduration=11.310343804 podStartE2EDuration="22.154941794s" podCreationTimestamp="2025-12-10 11:12:28 +0000 UTC" firstStartedPulling="2025-12-10 11:12:37.243812684 +0000 UTC m=+1662.097206127" lastFinishedPulling="2025-12-10 11:12:48.088410674 +0000 UTC m=+1672.941804117" observedRunningTime="2025-12-10 11:12:50.152987124 +0000 UTC m=+1675.006380587" watchObservedRunningTime="2025-12-10 11:12:50.154941794 +0000 UTC m=+1675.008335257" Dec 10 11:12:50 crc kubenswrapper[4780]: I1210 11:12:50.210767 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=21.616131343 podStartE2EDuration="1m3.210731267s" podCreationTimestamp="2025-12-10 11:11:47 +0000 UTC" firstStartedPulling="2025-12-10 11:12:06.71922669 +0000 UTC m=+1631.572620133" lastFinishedPulling="2025-12-10 11:12:48.313826614 +0000 UTC m=+1673.167220057" observedRunningTime="2025-12-10 11:12:50.200573908 +0000 UTC m=+1675.053967351" watchObservedRunningTime="2025-12-10 11:12:50.210731267 +0000 UTC m=+1675.064124700" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.404659 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-86zlx"] Dec 10 11:12:51 crc kubenswrapper[4780]: E1210 11:12:51.406142 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea884c19-2c44-4387-937a-61cc5c9aa861" containerName="init" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.406179 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea884c19-2c44-4387-937a-61cc5c9aa861" containerName="init" Dec 10 11:12:51 crc kubenswrapper[4780]: E1210 11:12:51.406212 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea884c19-2c44-4387-937a-61cc5c9aa861" containerName="dnsmasq-dns" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.406223 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea884c19-2c44-4387-937a-61cc5c9aa861" containerName="dnsmasq-dns" Dec 10 11:12:51 crc kubenswrapper[4780]: E1210 11:12:51.406259 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21a7de0d-bac7-4258-8062-414b665097c9" containerName="console" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.406269 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="21a7de0d-bac7-4258-8062-414b665097c9" containerName="console" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.406634 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="21a7de0d-bac7-4258-8062-414b665097c9" containerName="console" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.406670 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea884c19-2c44-4387-937a-61cc5c9aa861" containerName="dnsmasq-dns" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.408525 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.438883 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-86zlx"] Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.516059 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.516134 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-dns-svc\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.516177 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zgn4\" (UniqueName: \"kubernetes.io/projected/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-kube-api-access-4zgn4\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.516307 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-config\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.516347 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.619386 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.619478 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-dns-svc\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.619534 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zgn4\" (UniqueName: \"kubernetes.io/projected/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-kube-api-access-4zgn4\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.619652 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-config\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.619692 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.620871 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.621578 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.622226 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-dns-svc\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:51 crc kubenswrapper[4780]: I1210 11:12:51.623298 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-config\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:12:52 crc kubenswrapper[4780]: I1210 11:12:52.320233 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.202701 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.213166 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.223948 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.224367 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-zbxrn" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.224500 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.224706 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.766877 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.767172 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/3e1a1225-bdae-4dcb-b10a-02504fe590cd-lock\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.780107 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.780305 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wptnd\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-kube-api-access-wptnd\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.780394 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/3e1a1225-bdae-4dcb-b10a-02504fe590cd-cache\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.799012 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.825364 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-dtfn8"] Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.827205 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.831995 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.832947 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.833315 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.849859 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-dtfn8"] Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.889705 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3f1563d1-a52d-47ac-916b-b9fe05f59b28-etc-swift\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.889825 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.889886 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-combined-ca-bundle\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.890009 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wptnd\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-kube-api-access-wptnd\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.890070 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-scripts\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.890110 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/3e1a1225-bdae-4dcb-b10a-02504fe590cd-cache\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.890187 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-ring-data-devices\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.890228 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-swiftconf\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.890270 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hq7cj\" (UniqueName: \"kubernetes.io/projected/3f1563d1-a52d-47ac-916b-b9fe05f59b28-kube-api-access-hq7cj\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.890909 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/3e1a1225-bdae-4dcb-b10a-02504fe590cd-cache\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.891800 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.891843 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-dispersionconf\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.891986 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/3e1a1225-bdae-4dcb-b10a-02504fe590cd-lock\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.892587 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/3e1a1225-bdae-4dcb-b10a-02504fe590cd-lock\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: E1210 11:12:53.892725 4780 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:12:53 crc kubenswrapper[4780]: E1210 11:12:53.892740 4780 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:12:53 crc kubenswrapper[4780]: E1210 11:12:53.892822 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift podName:3e1a1225-bdae-4dcb-b10a-02504fe590cd nodeName:}" failed. No retries permitted until 2025-12-10 11:12:54.392792825 +0000 UTC m=+1679.246186268 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift") pod "swift-storage-0" (UID: "3e1a1225-bdae-4dcb-b10a-02504fe590cd") : configmap "swift-ring-files" not found Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.896495 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/swift-storage-0" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.945523 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-5zqjq"] Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.950857 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.982703 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-dtfn8"] Dec 10 11:12:53 crc kubenswrapper[4780]: E1210 11:12:53.984895 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-hq7cj ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-dtfn8" podUID="3f1563d1-a52d-47ac-916b-b9fe05f59b28" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.996936 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-5zqjq"] Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.998904 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-combined-ca-bundle\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.999088 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-scripts\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.999144 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-scripts\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.999705 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-dispersionconf\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:53 crc kubenswrapper[4780]: I1210 11:12:53.999787 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-ring-data-devices\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000060 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hq7cj\" (UniqueName: \"kubernetes.io/projected/3f1563d1-a52d-47ac-916b-b9fe05f59b28-kube-api-access-hq7cj\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000130 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-swiftconf\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000274 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-dispersionconf\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000348 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-ring-data-devices\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000400 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-combined-ca-bundle\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000531 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/72627815-752d-44a8-96cc-428f0239411d-etc-swift\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000601 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-swiftconf\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000651 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3f1563d1-a52d-47ac-916b-b9fe05f59b28-etc-swift\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.000788 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l8ng\" (UniqueName: \"kubernetes.io/projected/72627815-752d-44a8-96cc-428f0239411d-kube-api-access-5l8ng\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.001939 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3f1563d1-a52d-47ac-916b-b9fe05f59b28-etc-swift\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.002357 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-ring-data-devices\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.003197 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-scripts\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.003524 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x98wp" podStartSLOduration=28.857765278 podStartE2EDuration="40.003507499s" podCreationTimestamp="2025-12-10 11:12:14 +0000 UTC" firstStartedPulling="2025-12-10 11:12:37.236408345 +0000 UTC m=+1662.089801788" lastFinishedPulling="2025-12-10 11:12:48.382150566 +0000 UTC m=+1673.235544009" observedRunningTime="2025-12-10 11:12:53.849829969 +0000 UTC m=+1678.703223402" watchObservedRunningTime="2025-12-10 11:12:54.003507499 +0000 UTC m=+1678.856900942" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.018107 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=14.624059283 podStartE2EDuration="1m12.01806612s" podCreationTimestamp="2025-12-10 11:11:42 +0000 UTC" firstStartedPulling="2025-12-10 11:11:50.987144154 +0000 UTC m=+1615.840537597" lastFinishedPulling="2025-12-10 11:12:48.381150991 +0000 UTC m=+1673.234544434" observedRunningTime="2025-12-10 11:12:53.892701772 +0000 UTC m=+1678.746095215" watchObservedRunningTime="2025-12-10 11:12:54.01806612 +0000 UTC m=+1678.871459563" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.105629 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-ring-data-devices\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.105700 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-combined-ca-bundle\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.105758 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/72627815-752d-44a8-96cc-428f0239411d-etc-swift\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.105790 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-swiftconf\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.105853 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l8ng\" (UniqueName: \"kubernetes.io/projected/72627815-752d-44a8-96cc-428f0239411d-kube-api-access-5l8ng\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.105934 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-scripts\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.105976 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-dispersionconf\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.107263 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-ring-data-devices\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.107797 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-scripts\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.111278 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/72627815-752d-44a8-96cc-428f0239411d-etc-swift\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.246632 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.268342 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.320470 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.326237 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-ring-data-devices\") pod \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.326539 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-scripts\") pod \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.326629 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3f1563d1-a52d-47ac-916b-b9fe05f59b28-etc-swift\") pod \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.327015 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f1563d1-a52d-47ac-916b-b9fe05f59b28-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "3f1563d1-a52d-47ac-916b-b9fe05f59b28" (UID: "3f1563d1-a52d-47ac-916b-b9fe05f59b28"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.326903 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "3f1563d1-a52d-47ac-916b-b9fe05f59b28" (UID: "3f1563d1-a52d-47ac-916b-b9fe05f59b28"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.327337 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-scripts" (OuterVolumeSpecName: "scripts") pod "3f1563d1-a52d-47ac-916b-b9fe05f59b28" (UID: "3f1563d1-a52d-47ac-916b-b9fe05f59b28"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.328327 4780 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3f1563d1-a52d-47ac-916b-b9fe05f59b28-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.328354 4780 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.328369 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f1563d1-a52d-47ac-916b-b9fe05f59b28-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.430797 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:54 crc kubenswrapper[4780]: E1210 11:12:54.431030 4780 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:12:54 crc kubenswrapper[4780]: E1210 11:12:54.431070 4780 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:12:54 crc kubenswrapper[4780]: E1210 11:12:54.431146 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift podName:3e1a1225-bdae-4dcb-b10a-02504fe590cd nodeName:}" failed. No retries permitted until 2025-12-10 11:12:55.431118476 +0000 UTC m=+1680.284511909 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift") pod "swift-storage-0" (UID: "3e1a1225-bdae-4dcb-b10a-02504fe590cd") : configmap "swift-ring-files" not found Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.646828 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:54 crc kubenswrapper[4780]: I1210 11:12:54.647273 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:12:55 crc kubenswrapper[4780]: I1210 11:12:55.178805 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 10 11:12:55 crc kubenswrapper[4780]: I1210 11:12:55.260113 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:12:55 crc kubenswrapper[4780]: I1210 11:12:55.333408 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-dtfn8"] Dec 10 11:12:55 crc kubenswrapper[4780]: I1210 11:12:55.343395 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-dtfn8"] Dec 10 11:12:55 crc kubenswrapper[4780]: I1210 11:12:55.458179 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:55 crc kubenswrapper[4780]: E1210 11:12:55.458468 4780 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:12:55 crc kubenswrapper[4780]: E1210 11:12:55.458511 4780 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:12:55 crc kubenswrapper[4780]: E1210 11:12:55.458610 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift podName:3e1a1225-bdae-4dcb-b10a-02504fe590cd nodeName:}" failed. No retries permitted until 2025-12-10 11:12:57.458578593 +0000 UTC m=+1682.311972176 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift") pod "swift-storage-0" (UID: "3e1a1225-bdae-4dcb-b10a-02504fe590cd") : configmap "swift-ring-files" not found Dec 10 11:12:57 crc kubenswrapper[4780]: I1210 11:12:57.245090 4780 patch_prober.go:28] interesting pod/metrics-server-64c74dff4-7dr67 container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:10250/livez\": dial tcp 10.217.0.75:10250: i/o timeout" start-of-body= Dec 10 11:12:57 crc kubenswrapper[4780]: I1210 11:12:57.245167 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-64c74dff4-7dr67" podUID="fe22a59d-5885-47fe-a089-d4ffdd1e94ba" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.75:10250/livez\": dial tcp 10.217.0.75:10250: i/o timeout" Dec 10 11:12:57 crc kubenswrapper[4780]: I1210 11:12:57.252146 4780 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.470890357s: [/var/lib/containers/storage/overlay/071764779b254f986404ac9474686f6255e7b18625bfff10fb9847f5b9ca7642/diff /var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-jwjxx_b88956c1-b60b-4a6f-948a-de685134880f/manager/0.log]; will not log again for this container unless duration exceeds 2s Dec 10 11:12:57 crc kubenswrapper[4780]: I1210 11:12:57.475252 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:12:57 crc kubenswrapper[4780]: I1210 11:12:57.475826 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:12:57 crc kubenswrapper[4780]: I1210 11:12:57.521583 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:12:57 crc kubenswrapper[4780]: E1210 11:12:57.522069 4780 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:12:57 crc kubenswrapper[4780]: E1210 11:12:57.522104 4780 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:12:57 crc kubenswrapper[4780]: E1210 11:12:57.522208 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift podName:3e1a1225-bdae-4dcb-b10a-02504fe590cd nodeName:}" failed. No retries permitted until 2025-12-10 11:13:01.522183038 +0000 UTC m=+1686.375576481 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift") pod "swift-storage-0" (UID: "3e1a1225-bdae-4dcb-b10a-02504fe590cd") : configmap "swift-ring-files" not found Dec 10 11:12:59 crc kubenswrapper[4780]: I1210 11:12:59.346446 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-jgcc9" podStartSLOduration=54.225704612 podStartE2EDuration="1m7.346421239s" podCreationTimestamp="2025-12-10 11:11:52 +0000 UTC" firstStartedPulling="2025-12-10 11:12:35.261542232 +0000 UTC m=+1660.114935675" lastFinishedPulling="2025-12-10 11:12:48.382258859 +0000 UTC m=+1673.235652302" observedRunningTime="2025-12-10 11:12:59.345458295 +0000 UTC m=+1684.198851748" watchObservedRunningTime="2025-12-10 11:12:59.346421239 +0000 UTC m=+1684.199814682" Dec 10 11:13:01 crc kubenswrapper[4780]: I1210 11:13:01.607095 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:13:01 crc kubenswrapper[4780]: E1210 11:13:01.607612 4780 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:13:01 crc kubenswrapper[4780]: E1210 11:13:01.608089 4780 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:13:01 crc kubenswrapper[4780]: E1210 11:13:01.608184 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift podName:3e1a1225-bdae-4dcb-b10a-02504fe590cd nodeName:}" failed. No retries permitted until 2025-12-10 11:13:09.608155819 +0000 UTC m=+1694.461549262 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift") pod "swift-storage-0" (UID: "3e1a1225-bdae-4dcb-b10a-02504fe590cd") : configmap "swift-ring-files" not found Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.314727 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-swiftconf\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.316166 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-combined-ca-bundle\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.315795 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hq7cj\" (UniqueName: \"kubernetes.io/projected/3f1563d1-a52d-47ac-916b-b9fe05f59b28-kube-api-access-hq7cj\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.315847 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zgn4\" (UniqueName: \"kubernetes.io/projected/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-kube-api-access-4zgn4\") pod \"dnsmasq-dns-698758b865-86zlx\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.315155 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wptnd\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-kube-api-access-wptnd\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.316817 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-dispersionconf\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.316959 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-combined-ca-bundle\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.317143 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-dispersionconf\") pod \"swift-ring-rebalance-dtfn8\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " pod="openstack/swift-ring-rebalance-dtfn8" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.317499 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-swiftconf\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.322186 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l8ng\" (UniqueName: \"kubernetes.io/projected/72627815-752d-44a8-96cc-428f0239411d-kube-api-access-5l8ng\") pod \"swift-ring-rebalance-5zqjq\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.329515 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-combined-ca-bundle\") pod \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.329750 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-dispersionconf\") pod \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.329849 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hq7cj\" (UniqueName: \"kubernetes.io/projected/3f1563d1-a52d-47ac-916b-b9fe05f59b28-kube-api-access-hq7cj\") pod \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.330088 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-swiftconf\") pod \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\" (UID: \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\") " Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.339786 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f1563d1-a52d-47ac-916b-b9fe05f59b28" (UID: "3f1563d1-a52d-47ac-916b-b9fe05f59b28"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.340370 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "3f1563d1-a52d-47ac-916b-b9fe05f59b28" (UID: "3f1563d1-a52d-47ac-916b-b9fe05f59b28"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.341403 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f1563d1-a52d-47ac-916b-b9fe05f59b28-kube-api-access-hq7cj" (OuterVolumeSpecName: "kube-api-access-hq7cj") pod "3f1563d1-a52d-47ac-916b-b9fe05f59b28" (UID: "3f1563d1-a52d-47ac-916b-b9fe05f59b28"). InnerVolumeSpecName "kube-api-access-hq7cj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:04 crc kubenswrapper[4780]: E1210 11:13:04.343508 4780 kubelet_volumes.go:263] "There were many similar errors. Turn up verbosity to see them." err="orphaned pod \"3f1563d1-a52d-47ac-916b-b9fe05f59b28\" found, but failed to rmdir() volume at path /var/lib/kubelet/pods/3f1563d1-a52d-47ac-916b-b9fe05f59b28/volumes/kubernetes.io~projected/kube-api-access-hq7cj: device or resource busy" numErrs=5 Dec 10 11:13:04 crc kubenswrapper[4780]: E1210 11:13:04.344183 4780 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="7.118s" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.344226 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-jgcc9" event={"ID":"72e2c2ed-0530-4846-9244-b93076ed5640","Type":"ContainerStarted","Data":"90ab00e1ee545fb07993b9cfd42239b6f756fed552273f74937c685f40b64533"} Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.344274 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.346054 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "3f1563d1-a52d-47ac-916b-b9fe05f59b28" (UID: "3f1563d1-a52d-47ac-916b-b9fe05f59b28"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.361047 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.364815 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f1563d1-a52d-47ac-916b-b9fe05f59b28" path="/var/lib/kubelet/pods/3f1563d1-a52d-47ac-916b-b9fe05f59b28/volumes" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.365502 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.366102 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.366118 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" event={"ID":"f1e3b9ec-1036-4bee-bbc8-336293208b48","Type":"ContainerStarted","Data":"90e152659b0479eaa3d7020144abd7a1eab7f334793c4760f33c06f75a08875b"} Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.366212 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wt5zb" event={"ID":"6bd77f46-f3d3-45a7-bc8e-f3de677e1583","Type":"ContainerStarted","Data":"7e3d8055ee567598845fd3ddde795c8fa68f5d8c191237be1ed59f4d2fcfcc59"} Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.366229 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c31145f5-6188-4934-8ceb-a86ac4a0e997","Type":"ContainerStarted","Data":"e80c39a58e516d93fed0174ae9b034d55fe851226c0997c0e1e395c46d849b98"} Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.404495 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-wt5zb" podStartSLOduration=19.501510059 podStartE2EDuration="1m19.404468573s" podCreationTimestamp="2025-12-10 11:11:45 +0000 UTC" firstStartedPulling="2025-12-10 11:11:48.480472614 +0000 UTC m=+1613.333866058" lastFinishedPulling="2025-12-10 11:12:48.383431129 +0000 UTC m=+1673.236824572" observedRunningTime="2025-12-10 11:13:04.398197013 +0000 UTC m=+1689.251590466" watchObservedRunningTime="2025-12-10 11:13:04.404468573 +0000 UTC m=+1689.257862016" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.442259 4780 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.442313 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hq7cj\" (UniqueName: \"kubernetes.io/projected/3f1563d1-a52d-47ac-916b-b9fe05f59b28-kube-api-access-hq7cj\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.442328 4780 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.442341 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f1563d1-a52d-47ac-916b-b9fe05f59b28-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.445815 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-26rg4" podStartSLOduration=21.920855845 podStartE2EDuration="1m22.445795877s" podCreationTimestamp="2025-12-10 11:11:42 +0000 UTC" firstStartedPulling="2025-12-10 11:11:47.862783917 +0000 UTC m=+1612.716177360" lastFinishedPulling="2025-12-10 11:12:48.387723949 +0000 UTC m=+1673.241117392" observedRunningTime="2025-12-10 11:13:04.426601168 +0000 UTC m=+1689.279994611" watchObservedRunningTime="2025-12-10 11:13:04.445795877 +0000 UTC m=+1689.299189320" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.494425 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-zbxrn" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.501101 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:13:04 crc kubenswrapper[4780]: I1210 11:13:04.562464 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=32.755127959 podStartE2EDuration="1m31.562420932s" podCreationTimestamp="2025-12-10 11:11:33 +0000 UTC" firstStartedPulling="2025-12-10 11:11:37.261772397 +0000 UTC m=+1602.115165840" lastFinishedPulling="2025-12-10 11:12:36.06906537 +0000 UTC m=+1660.922458813" observedRunningTime="2025-12-10 11:13:04.498256115 +0000 UTC m=+1689.351649558" watchObservedRunningTime="2025-12-10 11:13:04.562420932 +0000 UTC m=+1689.415814375" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.128946 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.372268 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.377671 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.386156 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.386758 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.466202 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.466770 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.646788 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.646997 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.647210 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.659874 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.706029 4780 generic.go:334] "Generic (PLEG): container finished" podID="ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b" containerID="7022a6b60831aa66636b0cac1b288d2f037264625e94f54dbf578d7a0ac89f86" exitCode=0 Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.706164 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b","Type":"ContainerDied","Data":"7022a6b60831aa66636b0cac1b288d2f037264625e94f54dbf578d7a0ac89f86"} Dec 10 11:13:05 crc kubenswrapper[4780]: I1210 11:13:05.729036 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"930a45eb-72d1-4060-92de-2e348073eb16","Type":"ContainerStarted","Data":"ed7f26679a7c0d61b6e2539f857a270f9cc2f930158baaf0029c54842591e814"} Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.244700 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.248433 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.256950 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.257276 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-7xcl8" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.257781 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.257942 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.282668 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.312643 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.312712 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.312804 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-scripts\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.312835 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfr4q\" (UniqueName: \"kubernetes.io/projected/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-kube-api-access-cfr4q\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.312899 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-config\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.315101 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.315192 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.422245 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-scripts\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.422889 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfr4q\" (UniqueName: \"kubernetes.io/projected/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-kube-api-access-cfr4q\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.423023 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-config\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.423102 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.423211 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.423404 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.423466 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.440737 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.457300 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-wt5zb" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.469295 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-config\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.470185 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-scripts\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.484169 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-86zlx"] Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.521132 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-5zqjq"] Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.615270 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfr4q\" (UniqueName: \"kubernetes.io/projected/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-kube-api-access-cfr4q\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.614416 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.630091 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.631031 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f260d05-cefc-4e35-a7f3-b1a656cdf9cd-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd\") " pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.632767 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.751470 4780 generic.go:334] "Generic (PLEG): container finished" podID="1c086cc9-263e-4d8e-b3fb-a64fea7f179c" containerID="5170f30541367b32144ad9f3f93966e4f95459adc815afbbb4c9569ce5828d08" exitCode=0 Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.751539 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hzgvf" event={"ID":"1c086cc9-263e-4d8e-b3fb-a64fea7f179c","Type":"ContainerDied","Data":"5170f30541367b32144ad9f3f93966e4f95459adc815afbbb4c9569ce5828d08"} Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.756790 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b","Type":"ContainerStarted","Data":"1020e8292757eeb32a83562dc3eb17672cd2354dba8e716d4b259f41ab19a44e"} Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.765564 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae","Type":"ContainerStarted","Data":"5b477ceaf617590d7e0355d0772b11a52ca0d161acfc933673d187471a1a5ca7"} Dec 10 11:13:06 crc kubenswrapper[4780]: I1210 11:13:06.917234 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371944.937565 podStartE2EDuration="1m31.917210336s" podCreationTimestamp="2025-12-10 11:11:35 +0000 UTC" firstStartedPulling="2025-12-10 11:11:38.658637693 +0000 UTC m=+1603.512031136" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:06.909899699 +0000 UTC m=+1691.763293142" watchObservedRunningTime="2025-12-10 11:13:06.917210336 +0000 UTC m=+1691.770603779" Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.330433 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.331244 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.733558 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8nw2"] Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.734634 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d8nw2" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerName="registry-server" containerID="cri-o://19fd7d2dee1bc4e5995f821bcaaa82497fca8f4ce333d741a50ee82d98bbf683" gracePeriod=2 Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.795953 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"82752cbd-d657-4c6d-94f6-e9e75a4452c2","Type":"ContainerStarted","Data":"bef97abe36aa9940e552f320a20692d088e93eb61bfc56f928d12303d1749922"} Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.796261 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.806893 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-86zlx" event={"ID":"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7","Type":"ContainerStarted","Data":"40fa5c6569ed39e3e81dedab39ff65d972c47dc75f6ce8743187876723c0cd46"} Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.826975 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-5zqjq" event={"ID":"72627815-752d-44a8-96cc-428f0239411d","Type":"ContainerStarted","Data":"0b5ec7aa841cfd6599ea90e44f0614da75732331ab7745877497eba87e12f3df"} Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.840867 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x98wp"] Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.841447 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x98wp" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerName="registry-server" containerID="cri-o://d75ad5c0ee78653752504b1129a63b9c964506969dabbe920ad3b01a94cdc506" gracePeriod=2 Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.843121 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=8.730043141 podStartE2EDuration="1m27.843106062s" podCreationTimestamp="2025-12-10 11:11:40 +0000 UTC" firstStartedPulling="2025-12-10 11:11:46.333204005 +0000 UTC m=+1611.186597448" lastFinishedPulling="2025-12-10 11:13:05.446266926 +0000 UTC m=+1690.299660369" observedRunningTime="2025-12-10 11:13:07.824583429 +0000 UTC m=+1692.677976862" watchObservedRunningTime="2025-12-10 11:13:07.843106062 +0000 UTC m=+1692.696499505" Dec 10 11:13:07 crc kubenswrapper[4780]: I1210 11:13:07.987594 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 10 11:13:08 crc kubenswrapper[4780]: E1210 11:13:08.099435 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod200a4e5a_e0a1_4e36_8843_147fcf1de0b4.slice/crio-19fd7d2dee1bc4e5995f821bcaaa82497fca8f4ce333d741a50ee82d98bbf683.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:13:08 crc kubenswrapper[4780]: I1210 11:13:08.880667 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd","Type":"ContainerStarted","Data":"e4d2674cb47824c41e7df016eb39eba33ce9bb05e75827bd1f15357b21a92fd8"} Dec 10 11:13:08 crc kubenswrapper[4780]: I1210 11:13:08.946492 4780 generic.go:334] "Generic (PLEG): container finished" podID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerID="d75ad5c0ee78653752504b1129a63b9c964506969dabbe920ad3b01a94cdc506" exitCode=0 Dec 10 11:13:08 crc kubenswrapper[4780]: I1210 11:13:08.946676 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x98wp" event={"ID":"212dc98e-3ddc-4efb-8300-968ba8c38626","Type":"ContainerDied","Data":"d75ad5c0ee78653752504b1129a63b9c964506969dabbe920ad3b01a94cdc506"} Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.055599 4780 generic.go:334] "Generic (PLEG): container finished" podID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerID="19fd7d2dee1bc4e5995f821bcaaa82497fca8f4ce333d741a50ee82d98bbf683" exitCode=0 Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.056490 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8nw2" event={"ID":"200a4e5a-e0a1-4e36-8843-147fcf1de0b4","Type":"ContainerDied","Data":"19fd7d2dee1bc4e5995f821bcaaa82497fca8f4ce333d741a50ee82d98bbf683"} Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.085330 4780 generic.go:334] "Generic (PLEG): container finished" podID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerID="78e41bb1a9c33cf73cab83ff2b6b11248eaa897021cc13de028de30c3b31c485" exitCode=0 Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.085960 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-86zlx" event={"ID":"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7","Type":"ContainerDied","Data":"78e41bb1a9c33cf73cab83ff2b6b11248eaa897021cc13de028de30c3b31c485"} Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.184248 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.184647 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hzgvf" event={"ID":"1c086cc9-263e-4d8e-b3fb-a64fea7f179c","Type":"ContainerStarted","Data":"71a93a778caa2514843d4ae8acef266200e5dcfe4168e60637a81cf3a020a0ba"} Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.229327 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerStarted","Data":"8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4"} Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.397878 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.441238 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-utilities\") pod \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.441393 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdqk5\" (UniqueName: \"kubernetes.io/projected/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-kube-api-access-mdqk5\") pod \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.441635 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-catalog-content\") pod \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\" (UID: \"200a4e5a-e0a1-4e36-8843-147fcf1de0b4\") " Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.447264 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-utilities" (OuterVolumeSpecName: "utilities") pod "200a4e5a-e0a1-4e36-8843-147fcf1de0b4" (UID: "200a4e5a-e0a1-4e36-8843-147fcf1de0b4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.459331 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-kube-api-access-mdqk5" (OuterVolumeSpecName: "kube-api-access-mdqk5") pod "200a4e5a-e0a1-4e36-8843-147fcf1de0b4" (UID: "200a4e5a-e0a1-4e36-8843-147fcf1de0b4"). InnerVolumeSpecName "kube-api-access-mdqk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.528009 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "200a4e5a-e0a1-4e36-8843-147fcf1de0b4" (UID: "200a4e5a-e0a1-4e36-8843-147fcf1de0b4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.546090 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-utilities\") pod \"212dc98e-3ddc-4efb-8300-968ba8c38626\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.546351 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-catalog-content\") pod \"212dc98e-3ddc-4efb-8300-968ba8c38626\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.546389 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gml4r\" (UniqueName: \"kubernetes.io/projected/212dc98e-3ddc-4efb-8300-968ba8c38626-kube-api-access-gml4r\") pod \"212dc98e-3ddc-4efb-8300-968ba8c38626\" (UID: \"212dc98e-3ddc-4efb-8300-968ba8c38626\") " Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.548629 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-utilities" (OuterVolumeSpecName: "utilities") pod "212dc98e-3ddc-4efb-8300-968ba8c38626" (UID: "212dc98e-3ddc-4efb-8300-968ba8c38626"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.550230 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.550265 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.550276 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.550286 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdqk5\" (UniqueName: \"kubernetes.io/projected/200a4e5a-e0a1-4e36-8843-147fcf1de0b4-kube-api-access-mdqk5\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.562498 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/212dc98e-3ddc-4efb-8300-968ba8c38626-kube-api-access-gml4r" (OuterVolumeSpecName: "kube-api-access-gml4r") pod "212dc98e-3ddc-4efb-8300-968ba8c38626" (UID: "212dc98e-3ddc-4efb-8300-968ba8c38626"). InnerVolumeSpecName "kube-api-access-gml4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.653400 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:13:09 crc kubenswrapper[4780]: E1210 11:13:09.653690 4780 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:13:09 crc kubenswrapper[4780]: E1210 11:13:09.653766 4780 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:13:09 crc kubenswrapper[4780]: E1210 11:13:09.653863 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift podName:3e1a1225-bdae-4dcb-b10a-02504fe590cd nodeName:}" failed. No retries permitted until 2025-12-10 11:13:25.653835778 +0000 UTC m=+1710.507229391 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift") pod "swift-storage-0" (UID: "3e1a1225-bdae-4dcb-b10a-02504fe590cd") : configmap "swift-ring-files" not found Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.654319 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gml4r\" (UniqueName: \"kubernetes.io/projected/212dc98e-3ddc-4efb-8300-968ba8c38626-kube-api-access-gml4r\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.677283 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "212dc98e-3ddc-4efb-8300-968ba8c38626" (UID: "212dc98e-3ddc-4efb-8300-968ba8c38626"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:13:09 crc kubenswrapper[4780]: I1210 11:13:09.757085 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/212dc98e-3ddc-4efb-8300-968ba8c38626-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.251369 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hzgvf" event={"ID":"1c086cc9-263e-4d8e-b3fb-a64fea7f179c","Type":"ContainerStarted","Data":"e10b1d055f6d38f1d9d0300c2b7164e5a12f3fc9101a0ffff243775423b1cb00"} Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.252037 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.252062 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.260372 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x98wp" event={"ID":"212dc98e-3ddc-4efb-8300-968ba8c38626","Type":"ContainerDied","Data":"499d65c4d8b3edc76ff008319ed38533470be8088e64e1c3386f934ff98b9776"} Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.260406 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x98wp" Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.260480 4780 scope.go:117] "RemoveContainer" containerID="d75ad5c0ee78653752504b1129a63b9c964506969dabbe920ad3b01a94cdc506" Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.280322 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8nw2" event={"ID":"200a4e5a-e0a1-4e36-8843-147fcf1de0b4","Type":"ContainerDied","Data":"c9e0906bac7ef2e7f92e16979c48b39d3285cd13178ad4d7c16641abc15d666f"} Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.280388 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8nw2" Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.294282 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-86zlx" event={"ID":"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7","Type":"ContainerStarted","Data":"8af88366bfa447c6c69e5fab61649911eb8e5f373bb2dae0e0d88b7fae52f781"} Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.295146 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.370822 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-hzgvf" podStartSLOduration=26.810983705 podStartE2EDuration="1m25.370782365s" podCreationTimestamp="2025-12-10 11:11:45 +0000 UTC" firstStartedPulling="2025-12-10 11:11:49.755099691 +0000 UTC m=+1614.608493134" lastFinishedPulling="2025-12-10 11:12:48.314898351 +0000 UTC m=+1673.168291794" observedRunningTime="2025-12-10 11:13:10.278991544 +0000 UTC m=+1695.132384987" watchObservedRunningTime="2025-12-10 11:13:10.370782365 +0000 UTC m=+1695.224175798" Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.372875 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x98wp"] Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.387847 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x98wp"] Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.425304 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8nw2"] Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.443795 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8nw2"] Dec 10 11:13:10 crc kubenswrapper[4780]: I1210 11:13:10.445506 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-86zlx" podStartSLOduration=19.44548079 podStartE2EDuration="19.44548079s" podCreationTimestamp="2025-12-10 11:12:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:10.422221287 +0000 UTC m=+1695.275614750" watchObservedRunningTime="2025-12-10 11:13:10.44548079 +0000 UTC m=+1695.298874233" Dec 10 11:13:11 crc kubenswrapper[4780]: I1210 11:13:11.099666 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 10 11:13:12 crc kubenswrapper[4780]: I1210 11:13:12.009065 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" path="/var/lib/kubelet/pods/200a4e5a-e0a1-4e36-8843-147fcf1de0b4/volumes" Dec 10 11:13:12 crc kubenswrapper[4780]: I1210 11:13:12.010080 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" path="/var/lib/kubelet/pods/212dc98e-3ddc-4efb-8300-968ba8c38626/volumes" Dec 10 11:13:13 crc kubenswrapper[4780]: I1210 11:13:13.348071 4780 scope.go:117] "RemoveContainer" containerID="5c2fba807e54e05dfd2af3a2ed774932fd34e8f7d29a16542eac79f887cf9c67" Dec 10 11:13:14 crc kubenswrapper[4780]: I1210 11:13:14.226779 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 10 11:13:14 crc kubenswrapper[4780]: I1210 11:13:14.359484 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 10 11:13:14 crc kubenswrapper[4780]: I1210 11:13:14.362140 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:13:14 crc kubenswrapper[4780]: I1210 11:13:14.502652 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-zl2c8"] Dec 10 11:13:14 crc kubenswrapper[4780]: I1210 11:13:14.503023 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" podUID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerName="dnsmasq-dns" containerID="cri-o://8a8467c9bf3fe827578e6033e1687ecd47450d4c35fa7663c421090f496acbfe" gracePeriod=10 Dec 10 11:13:14 crc kubenswrapper[4780]: I1210 11:13:14.591025 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" podUID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.144:5353: connect: connection refused" Dec 10 11:13:15 crc kubenswrapper[4780]: I1210 11:13:15.582399 4780 generic.go:334] "Generic (PLEG): container finished" podID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerID="8a8467c9bf3fe827578e6033e1687ecd47450d4c35fa7663c421090f496acbfe" exitCode=0 Dec 10 11:13:15 crc kubenswrapper[4780]: I1210 11:13:15.582508 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" event={"ID":"9a36247f-4fb6-4ec7-9688-aafdb77c1243","Type":"ContainerDied","Data":"8a8467c9bf3fe827578e6033e1687ecd47450d4c35fa7663c421090f496acbfe"} Dec 10 11:13:15 crc kubenswrapper[4780]: I1210 11:13:15.649784 4780 scope.go:117] "RemoveContainer" containerID="71eb4039d1ca08b1cc21985a283e384cd25949a02d25695aebea24cbd2ad46d8" Dec 10 11:13:15 crc kubenswrapper[4780]: I1210 11:13:15.736097 4780 scope.go:117] "RemoveContainer" containerID="19fd7d2dee1bc4e5995f821bcaaa82497fca8f4ce333d741a50ee82d98bbf683" Dec 10 11:13:15 crc kubenswrapper[4780]: I1210 11:13:15.921423 4780 scope.go:117] "RemoveContainer" containerID="4f96a19c7e961068fe26936d1c9f91d6ba0b50df2234ec005171e7fc37dd0040" Dec 10 11:13:15 crc kubenswrapper[4780]: I1210 11:13:15.989358 4780 scope.go:117] "RemoveContainer" containerID="a9bf80d57521f9130a8c906cadd4a2019e92ea86c6d3c66204eb8ff31a515645" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.153351 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.320461 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgr7d\" (UniqueName: \"kubernetes.io/projected/9a36247f-4fb6-4ec7-9688-aafdb77c1243-kube-api-access-dgr7d\") pod \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.320582 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-sb\") pod \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.320648 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-config\") pod \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.320728 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-dns-svc\") pod \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.320859 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-nb\") pod \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\" (UID: \"9a36247f-4fb6-4ec7-9688-aafdb77c1243\") " Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.343333 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a36247f-4fb6-4ec7-9688-aafdb77c1243-kube-api-access-dgr7d" (OuterVolumeSpecName: "kube-api-access-dgr7d") pod "9a36247f-4fb6-4ec7-9688-aafdb77c1243" (UID: "9a36247f-4fb6-4ec7-9688-aafdb77c1243"). InnerVolumeSpecName "kube-api-access-dgr7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.409770 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9a36247f-4fb6-4ec7-9688-aafdb77c1243" (UID: "9a36247f-4fb6-4ec7-9688-aafdb77c1243"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.414833 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9a36247f-4fb6-4ec7-9688-aafdb77c1243" (UID: "9a36247f-4fb6-4ec7-9688-aafdb77c1243"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.424741 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgr7d\" (UniqueName: \"kubernetes.io/projected/9a36247f-4fb6-4ec7-9688-aafdb77c1243-kube-api-access-dgr7d\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.424801 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.424815 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.427320 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9a36247f-4fb6-4ec7-9688-aafdb77c1243" (UID: "9a36247f-4fb6-4ec7-9688-aafdb77c1243"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.428648 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-config" (OuterVolumeSpecName: "config") pod "9a36247f-4fb6-4ec7-9688-aafdb77c1243" (UID: "9a36247f-4fb6-4ec7-9688-aafdb77c1243"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.526780 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.526840 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a36247f-4fb6-4ec7-9688-aafdb77c1243-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.540408 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.599186 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd","Type":"ContainerStarted","Data":"61a65e94a51c9c0bec7b7d68be0ff170ce1b0b202894cc49e50f802adc6f32c8"} Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.599273 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7f260d05-cefc-4e35-a7f3-b1a656cdf9cd","Type":"ContainerStarted","Data":"cf4ee925e48ef4b38c4058b2b25f71dee8e5e7f2818e5ae5e9f171a8f55e0391"} Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.599377 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.607760 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" event={"ID":"9a36247f-4fb6-4ec7-9688-aafdb77c1243","Type":"ContainerDied","Data":"319aabb3e08290ef6764d8a93d1588922d2f2f89834ee6b3353d41554eb58bef"} Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.607832 4780 scope.go:117] "RemoveContainer" containerID="8a8467c9bf3fe827578e6033e1687ecd47450d4c35fa7663c421090f496acbfe" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.607995 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-zl2c8" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.620638 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-5zqjq" event={"ID":"72627815-752d-44a8-96cc-428f0239411d","Type":"ContainerStarted","Data":"83081b3dd12b5034cbe7328cde95d8befaa77fdb55499b34324cb97369597a7b"} Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.634582 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.092949021 podStartE2EDuration="10.634549663s" podCreationTimestamp="2025-12-10 11:13:06 +0000 UTC" firstStartedPulling="2025-12-10 11:13:08.195859009 +0000 UTC m=+1693.049252452" lastFinishedPulling="2025-12-10 11:13:15.737459651 +0000 UTC m=+1700.590853094" observedRunningTime="2025-12-10 11:13:16.627565495 +0000 UTC m=+1701.480958958" watchObservedRunningTime="2025-12-10 11:13:16.634549663 +0000 UTC m=+1701.487943126" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.661886 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-5zqjq" podStartSLOduration=14.806961131 podStartE2EDuration="23.66185642s" podCreationTimestamp="2025-12-10 11:12:53 +0000 UTC" firstStartedPulling="2025-12-10 11:13:06.89894385 +0000 UTC m=+1691.752337293" lastFinishedPulling="2025-12-10 11:13:15.753839139 +0000 UTC m=+1700.607232582" observedRunningTime="2025-12-10 11:13:16.654185694 +0000 UTC m=+1701.507579157" watchObservedRunningTime="2025-12-10 11:13:16.66185642 +0000 UTC m=+1701.515249863" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.667830 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.751310 4780 scope.go:117] "RemoveContainer" containerID="83b317a9232272a9cdac91028a239c2a11ddcf7386bb31fc89962723d5d556b3" Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.757288 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-zl2c8"] Dec 10 11:13:16 crc kubenswrapper[4780]: I1210 11:13:16.772734 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-zl2c8"] Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.395745 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-sms69"] Dec 10 11:13:17 crc kubenswrapper[4780]: E1210 11:13:17.396612 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerName="registry-server" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.396644 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerName="registry-server" Dec 10 11:13:17 crc kubenswrapper[4780]: E1210 11:13:17.396670 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerName="init" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.396677 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerName="init" Dec 10 11:13:17 crc kubenswrapper[4780]: E1210 11:13:17.396695 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerName="extract-content" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.396706 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerName="extract-content" Dec 10 11:13:17 crc kubenswrapper[4780]: E1210 11:13:17.396743 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerName="extract-utilities" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.396749 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerName="extract-utilities" Dec 10 11:13:17 crc kubenswrapper[4780]: E1210 11:13:17.396762 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerName="dnsmasq-dns" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.396769 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerName="dnsmasq-dns" Dec 10 11:13:17 crc kubenswrapper[4780]: E1210 11:13:17.396777 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerName="extract-content" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.396783 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerName="extract-content" Dec 10 11:13:17 crc kubenswrapper[4780]: E1210 11:13:17.396806 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerName="extract-utilities" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.396813 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerName="extract-utilities" Dec 10 11:13:17 crc kubenswrapper[4780]: E1210 11:13:17.396823 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerName="registry-server" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.396829 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerName="registry-server" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.397113 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" containerName="dnsmasq-dns" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.397164 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="212dc98e-3ddc-4efb-8300-968ba8c38626" containerName="registry-server" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.397177 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="200a4e5a-e0a1-4e36-8843-147fcf1de0b4" containerName="registry-server" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.398412 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-sms69" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.410063 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-2e1e-account-create-update-xshf7"] Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.411990 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.414458 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.422642 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-sms69"] Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.433484 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-2e1e-account-create-update-xshf7"] Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.468341 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6e99715-0d2d-4998-899e-58c68d7db78a-operator-scripts\") pod \"keystone-2e1e-account-create-update-xshf7\" (UID: \"c6e99715-0d2d-4998-899e-58c68d7db78a\") " pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.468684 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4hdl\" (UniqueName: \"kubernetes.io/projected/c6e99715-0d2d-4998-899e-58c68d7db78a-kube-api-access-w4hdl\") pod \"keystone-2e1e-account-create-update-xshf7\" (UID: \"c6e99715-0d2d-4998-899e-58c68d7db78a\") " pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.468842 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df258243-e086-4015-829c-01101a52b26e-operator-scripts\") pod \"keystone-db-create-sms69\" (UID: \"df258243-e086-4015-829c-01101a52b26e\") " pod="openstack/keystone-db-create-sms69" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.469290 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8wp8\" (UniqueName: \"kubernetes.io/projected/df258243-e086-4015-829c-01101a52b26e-kube-api-access-w8wp8\") pod \"keystone-db-create-sms69\" (UID: \"df258243-e086-4015-829c-01101a52b26e\") " pod="openstack/keystone-db-create-sms69" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.572721 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8wp8\" (UniqueName: \"kubernetes.io/projected/df258243-e086-4015-829c-01101a52b26e-kube-api-access-w8wp8\") pod \"keystone-db-create-sms69\" (UID: \"df258243-e086-4015-829c-01101a52b26e\") " pod="openstack/keystone-db-create-sms69" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.572854 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6e99715-0d2d-4998-899e-58c68d7db78a-operator-scripts\") pod \"keystone-2e1e-account-create-update-xshf7\" (UID: \"c6e99715-0d2d-4998-899e-58c68d7db78a\") " pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.572975 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4hdl\" (UniqueName: \"kubernetes.io/projected/c6e99715-0d2d-4998-899e-58c68d7db78a-kube-api-access-w4hdl\") pod \"keystone-2e1e-account-create-update-xshf7\" (UID: \"c6e99715-0d2d-4998-899e-58c68d7db78a\") " pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.573033 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df258243-e086-4015-829c-01101a52b26e-operator-scripts\") pod \"keystone-db-create-sms69\" (UID: \"df258243-e086-4015-829c-01101a52b26e\") " pod="openstack/keystone-db-create-sms69" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.574675 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6e99715-0d2d-4998-899e-58c68d7db78a-operator-scripts\") pod \"keystone-2e1e-account-create-update-xshf7\" (UID: \"c6e99715-0d2d-4998-899e-58c68d7db78a\") " pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.574864 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df258243-e086-4015-829c-01101a52b26e-operator-scripts\") pod \"keystone-db-create-sms69\" (UID: \"df258243-e086-4015-829c-01101a52b26e\") " pod="openstack/keystone-db-create-sms69" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.603780 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8wp8\" (UniqueName: \"kubernetes.io/projected/df258243-e086-4015-829c-01101a52b26e-kube-api-access-w8wp8\") pod \"keystone-db-create-sms69\" (UID: \"df258243-e086-4015-829c-01101a52b26e\") " pod="openstack/keystone-db-create-sms69" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.616620 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4hdl\" (UniqueName: \"kubernetes.io/projected/c6e99715-0d2d-4998-899e-58c68d7db78a-kube-api-access-w4hdl\") pod \"keystone-2e1e-account-create-update-xshf7\" (UID: \"c6e99715-0d2d-4998-899e-58c68d7db78a\") " pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.750002 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-ab68-account-create-update-ss7hk"] Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.752543 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.760047 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.773134 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ab68-account-create-update-ss7hk"] Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.783705 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-sms69" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.792727 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq7z9\" (UniqueName: \"kubernetes.io/projected/bd881333-f3fb-4c4d-b31e-f755f9c1271d-kube-api-access-sq7z9\") pod \"glance-ab68-account-create-update-ss7hk\" (UID: \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\") " pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.797294 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.792896 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd881333-f3fb-4c4d-b31e-f755f9c1271d-operator-scripts\") pod \"glance-ab68-account-create-update-ss7hk\" (UID: \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\") " pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.830532 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-vwf8h"] Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.835103 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.883524 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-vwf8h"] Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.931514 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8qm6\" (UniqueName: \"kubernetes.io/projected/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-kube-api-access-g8qm6\") pod \"glance-db-create-vwf8h\" (UID: \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\") " pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.932032 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq7z9\" (UniqueName: \"kubernetes.io/projected/bd881333-f3fb-4c4d-b31e-f755f9c1271d-kube-api-access-sq7z9\") pod \"glance-ab68-account-create-update-ss7hk\" (UID: \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\") " pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.932156 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-operator-scripts\") pod \"glance-db-create-vwf8h\" (UID: \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\") " pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.932415 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd881333-f3fb-4c4d-b31e-f755f9c1271d-operator-scripts\") pod \"glance-ab68-account-create-update-ss7hk\" (UID: \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\") " pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:17 crc kubenswrapper[4780]: I1210 11:13:17.933573 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd881333-f3fb-4c4d-b31e-f755f9c1271d-operator-scripts\") pod \"glance-ab68-account-create-update-ss7hk\" (UID: \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\") " pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.069033 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq7z9\" (UniqueName: \"kubernetes.io/projected/bd881333-f3fb-4c4d-b31e-f755f9c1271d-kube-api-access-sq7z9\") pod \"glance-ab68-account-create-update-ss7hk\" (UID: \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\") " pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.096670 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8qm6\" (UniqueName: \"kubernetes.io/projected/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-kube-api-access-g8qm6\") pod \"glance-db-create-vwf8h\" (UID: \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\") " pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.096956 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-operator-scripts\") pod \"glance-db-create-vwf8h\" (UID: \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\") " pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.098764 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-operator-scripts\") pod \"glance-db-create-vwf8h\" (UID: \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\") " pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.115111 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.255518 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8qm6\" (UniqueName: \"kubernetes.io/projected/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-kube-api-access-g8qm6\") pod \"glance-db-create-vwf8h\" (UID: \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\") " pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.263018 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a36247f-4fb6-4ec7-9688-aafdb77c1243" path="/var/lib/kubelet/pods/9a36247f-4fb6-4ec7-9688-aafdb77c1243/volumes" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.270837 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-sws85"] Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.273291 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-sws85"] Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.273325 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-1476-account-create-update-wszkq"] Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.274866 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.275641 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-sws85" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.296061 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.326025 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-1476-account-create-update-wszkq"] Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.352980 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qw4v\" (UniqueName: \"kubernetes.io/projected/1d18f911-380d-4e2d-915b-18621871d0f5-kube-api-access-4qw4v\") pod \"placement-1476-account-create-update-wszkq\" (UID: \"1d18f911-380d-4e2d-915b-18621871d0f5\") " pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.353101 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d18f911-380d-4e2d-915b-18621871d0f5-operator-scripts\") pod \"placement-1476-account-create-update-wszkq\" (UID: \"1d18f911-380d-4e2d-915b-18621871d0f5\") " pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.353215 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59bbp\" (UniqueName: \"kubernetes.io/projected/002c5229-a237-4f6b-a323-f28d0eb09124-kube-api-access-59bbp\") pod \"placement-db-create-sws85\" (UID: \"002c5229-a237-4f6b-a323-f28d0eb09124\") " pod="openstack/placement-db-create-sws85" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.353272 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/002c5229-a237-4f6b-a323-f28d0eb09124-operator-scripts\") pod \"placement-db-create-sws85\" (UID: \"002c5229-a237-4f6b-a323-f28d0eb09124\") " pod="openstack/placement-db-create-sws85" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.354665 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qqjpk"] Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.360019 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.384142 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qqjpk"] Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.424301 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.457530 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qw4v\" (UniqueName: \"kubernetes.io/projected/1d18f911-380d-4e2d-915b-18621871d0f5-kube-api-access-4qw4v\") pod \"placement-1476-account-create-update-wszkq\" (UID: \"1d18f911-380d-4e2d-915b-18621871d0f5\") " pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.457636 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d18f911-380d-4e2d-915b-18621871d0f5-operator-scripts\") pod \"placement-1476-account-create-update-wszkq\" (UID: \"1d18f911-380d-4e2d-915b-18621871d0f5\") " pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.457687 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bws8\" (UniqueName: \"kubernetes.io/projected/a96954e9-6eb0-41ea-851e-41b5f7ef3197-kube-api-access-8bws8\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.457724 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-catalog-content\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.457817 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59bbp\" (UniqueName: \"kubernetes.io/projected/002c5229-a237-4f6b-a323-f28d0eb09124-kube-api-access-59bbp\") pod \"placement-db-create-sws85\" (UID: \"002c5229-a237-4f6b-a323-f28d0eb09124\") " pod="openstack/placement-db-create-sws85" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.457871 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/002c5229-a237-4f6b-a323-f28d0eb09124-operator-scripts\") pod \"placement-db-create-sws85\" (UID: \"002c5229-a237-4f6b-a323-f28d0eb09124\") " pod="openstack/placement-db-create-sws85" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.458024 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-utilities\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.459006 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d18f911-380d-4e2d-915b-18621871d0f5-operator-scripts\") pod \"placement-1476-account-create-update-wszkq\" (UID: \"1d18f911-380d-4e2d-915b-18621871d0f5\") " pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.459407 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/002c5229-a237-4f6b-a323-f28d0eb09124-operator-scripts\") pod \"placement-db-create-sws85\" (UID: \"002c5229-a237-4f6b-a323-f28d0eb09124\") " pod="openstack/placement-db-create-sws85" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.500783 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qw4v\" (UniqueName: \"kubernetes.io/projected/1d18f911-380d-4e2d-915b-18621871d0f5-kube-api-access-4qw4v\") pod \"placement-1476-account-create-update-wszkq\" (UID: \"1d18f911-380d-4e2d-915b-18621871d0f5\") " pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.503494 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59bbp\" (UniqueName: \"kubernetes.io/projected/002c5229-a237-4f6b-a323-f28d0eb09124-kube-api-access-59bbp\") pod \"placement-db-create-sws85\" (UID: \"002c5229-a237-4f6b-a323-f28d0eb09124\") " pod="openstack/placement-db-create-sws85" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.561760 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-utilities\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.562001 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bws8\" (UniqueName: \"kubernetes.io/projected/a96954e9-6eb0-41ea-851e-41b5f7ef3197-kube-api-access-8bws8\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.562046 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-catalog-content\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.562552 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-utilities\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.562805 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-catalog-content\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.589041 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bws8\" (UniqueName: \"kubernetes.io/projected/a96954e9-6eb0-41ea-851e-41b5f7ef3197-kube-api-access-8bws8\") pod \"community-operators-qqjpk\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.655044 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.704217 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-sws85" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.724815 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.944526 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-sms69"] Dec 10 11:13:18 crc kubenswrapper[4780]: I1210 11:13:18.975743 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-2e1e-account-create-update-xshf7"] Dec 10 11:13:19 crc kubenswrapper[4780]: W1210 11:13:19.115095 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf258243_e086_4015_829c_01101a52b26e.slice/crio-d2a2965e0c30942b021686c2654e5338df74f90c80bb1359fca55a7deff69593 WatchSource:0}: Error finding container d2a2965e0c30942b021686c2654e5338df74f90c80bb1359fca55a7deff69593: Status 404 returned error can't find the container with id d2a2965e0c30942b021686c2654e5338df74f90c80bb1359fca55a7deff69593 Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.342009 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ab68-account-create-update-ss7hk"] Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.566284 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-vwf8h"] Dec 10 11:13:19 crc kubenswrapper[4780]: W1210 11:13:19.596008 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8bc9d7bc_3890_4523_b805_9cd4c167fd9b.slice/crio-bb97b5a162334ac93e7f401d2a03bd64b97cc892387c00213a8770e883d63c3f WatchSource:0}: Error finding container bb97b5a162334ac93e7f401d2a03bd64b97cc892387c00213a8770e883d63c3f: Status 404 returned error can't find the container with id bb97b5a162334ac93e7f401d2a03bd64b97cc892387c00213a8770e883d63c3f Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.708495 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ab68-account-create-update-ss7hk" event={"ID":"bd881333-f3fb-4c4d-b31e-f755f9c1271d","Type":"ContainerStarted","Data":"e6f03c09f927e21ae1c4b8b0b3f595d9cb0ee77a1dcc7259e85b064e3bb778ff"} Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.711513 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-sms69" event={"ID":"df258243-e086-4015-829c-01101a52b26e","Type":"ContainerStarted","Data":"d2a2965e0c30942b021686c2654e5338df74f90c80bb1359fca55a7deff69593"} Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.720979 4780 generic.go:334] "Generic (PLEG): container finished" podID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerID="8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4" exitCode=0 Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.721101 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerDied","Data":"8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4"} Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.734788 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-vwf8h" event={"ID":"8bc9d7bc-3890-4523-b805-9cd4c167fd9b","Type":"ContainerStarted","Data":"bb97b5a162334ac93e7f401d2a03bd64b97cc892387c00213a8770e883d63c3f"} Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.754388 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2e1e-account-create-update-xshf7" event={"ID":"c6e99715-0d2d-4998-899e-58c68d7db78a","Type":"ContainerStarted","Data":"54829e742b0c6a2da5c38d6edb8bbd877bf78ddc8592ee482a708aca201356a6"} Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.866304 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-1476-account-create-update-wszkq"] Dec 10 11:13:19 crc kubenswrapper[4780]: I1210 11:13:19.896521 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-sws85"] Dec 10 11:13:20 crc kubenswrapper[4780]: I1210 11:13:20.157672 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qqjpk"] Dec 10 11:13:20 crc kubenswrapper[4780]: I1210 11:13:20.769605 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-sws85" event={"ID":"002c5229-a237-4f6b-a323-f28d0eb09124","Type":"ContainerStarted","Data":"51a693b91c80acfc4bc1f22c1b56ff1da210858911109d247564097c91c2c639"} Dec 10 11:13:20 crc kubenswrapper[4780]: I1210 11:13:20.772275 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-1476-account-create-update-wszkq" event={"ID":"1d18f911-380d-4e2d-915b-18621871d0f5","Type":"ContainerStarted","Data":"7628759c642e52cbb9bac8223e106ca70f53a77f4f94664fef68607d75889717"} Dec 10 11:13:20 crc kubenswrapper[4780]: I1210 11:13:20.773777 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqjpk" event={"ID":"a96954e9-6eb0-41ea-851e-41b5f7ef3197","Type":"ContainerStarted","Data":"b06e8b06056033cf90936b727a8fc92d519418cae76feb6fe27367e02904debe"} Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.048418 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-bfhjg"] Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.050573 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.060012 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-bfhjg"] Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.177206 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b65fc796-3b7d-44e5-98eb-898c371a7174-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-bfhjg\" (UID: \"b65fc796-3b7d-44e5-98eb-898c371a7174\") " pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.177265 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pdwx\" (UniqueName: \"kubernetes.io/projected/b65fc796-3b7d-44e5-98eb-898c371a7174-kube-api-access-4pdwx\") pod \"mysqld-exporter-openstack-db-create-bfhjg\" (UID: \"b65fc796-3b7d-44e5-98eb-898c371a7174\") " pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.280427 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b65fc796-3b7d-44e5-98eb-898c371a7174-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-bfhjg\" (UID: \"b65fc796-3b7d-44e5-98eb-898c371a7174\") " pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.280560 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pdwx\" (UniqueName: \"kubernetes.io/projected/b65fc796-3b7d-44e5-98eb-898c371a7174-kube-api-access-4pdwx\") pod \"mysqld-exporter-openstack-db-create-bfhjg\" (UID: \"b65fc796-3b7d-44e5-98eb-898c371a7174\") " pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.281412 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b65fc796-3b7d-44e5-98eb-898c371a7174-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-bfhjg\" (UID: \"b65fc796-3b7d-44e5-98eb-898c371a7174\") " pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.322872 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pdwx\" (UniqueName: \"kubernetes.io/projected/b65fc796-3b7d-44e5-98eb-898c371a7174-kube-api-access-4pdwx\") pod \"mysqld-exporter-openstack-db-create-bfhjg\" (UID: \"b65fc796-3b7d-44e5-98eb-898c371a7174\") " pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.421478 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.585903 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-3721-account-create-update-gbldb"] Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.588172 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.597361 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.618965 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-3721-account-create-update-gbldb"] Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.694373 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv9jw\" (UniqueName: \"kubernetes.io/projected/5a999d24-37c5-4026-9b4a-31e3642077e1-kube-api-access-jv9jw\") pod \"mysqld-exporter-3721-account-create-update-gbldb\" (UID: \"5a999d24-37c5-4026-9b4a-31e3642077e1\") " pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.695176 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a999d24-37c5-4026-9b4a-31e3642077e1-operator-scripts\") pod \"mysqld-exporter-3721-account-create-update-gbldb\" (UID: \"5a999d24-37c5-4026-9b4a-31e3642077e1\") " pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.798511 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv9jw\" (UniqueName: \"kubernetes.io/projected/5a999d24-37c5-4026-9b4a-31e3642077e1-kube-api-access-jv9jw\") pod \"mysqld-exporter-3721-account-create-update-gbldb\" (UID: \"5a999d24-37c5-4026-9b4a-31e3642077e1\") " pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.798796 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a999d24-37c5-4026-9b4a-31e3642077e1-operator-scripts\") pod \"mysqld-exporter-3721-account-create-update-gbldb\" (UID: \"5a999d24-37c5-4026-9b4a-31e3642077e1\") " pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.800901 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a999d24-37c5-4026-9b4a-31e3642077e1-operator-scripts\") pod \"mysqld-exporter-3721-account-create-update-gbldb\" (UID: \"5a999d24-37c5-4026-9b4a-31e3642077e1\") " pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.823296 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-sms69" event={"ID":"df258243-e086-4015-829c-01101a52b26e","Type":"ContainerStarted","Data":"a710a8ec6e7b61ce3c285a19fab368a1c68a89886775139086243d00f6cb3af4"} Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.834864 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-1476-account-create-update-wszkq" event={"ID":"1d18f911-380d-4e2d-915b-18621871d0f5","Type":"ContainerStarted","Data":"07ea6848dda70e7980c76502a9b5d5b203e25ca1a5790393a548aae5e22f3c96"} Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.834894 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv9jw\" (UniqueName: \"kubernetes.io/projected/5a999d24-37c5-4026-9b4a-31e3642077e1-kube-api-access-jv9jw\") pod \"mysqld-exporter-3721-account-create-update-gbldb\" (UID: \"5a999d24-37c5-4026-9b4a-31e3642077e1\") " pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.848078 4780 generic.go:334] "Generic (PLEG): container finished" podID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerID="7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4" exitCode=0 Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.848229 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqjpk" event={"ID":"a96954e9-6eb0-41ea-851e-41b5f7ef3197","Type":"ContainerDied","Data":"7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4"} Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.850934 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-sms69" podStartSLOduration=4.850897295 podStartE2EDuration="4.850897295s" podCreationTimestamp="2025-12-10 11:13:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:21.842509791 +0000 UTC m=+1706.695903234" watchObservedRunningTime="2025-12-10 11:13:21.850897295 +0000 UTC m=+1706.704290738" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.861556 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-vwf8h" event={"ID":"8bc9d7bc-3890-4523-b805-9cd4c167fd9b","Type":"ContainerStarted","Data":"8e46cfb55ce036b037d8dcf5b5ee9af30c938e54396d6effc1e3c728949b5a25"} Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.884027 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-1476-account-create-update-wszkq" podStartSLOduration=4.883994069 podStartE2EDuration="4.883994069s" podCreationTimestamp="2025-12-10 11:13:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:21.870656949 +0000 UTC m=+1706.724050412" watchObservedRunningTime="2025-12-10 11:13:21.883994069 +0000 UTC m=+1706.737387512" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.886528 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2e1e-account-create-update-xshf7" event={"ID":"c6e99715-0d2d-4998-899e-58c68d7db78a","Type":"ContainerStarted","Data":"53fa60f7a7b2ec54ab179f0eb28668d7df18f1b54aaf119b9c48ae469fc9867f"} Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.902207 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ab68-account-create-update-ss7hk" event={"ID":"bd881333-f3fb-4c4d-b31e-f755f9c1271d","Type":"ContainerStarted","Data":"3cca7f3649b3134d461ac278be985fe5a5bc49056f19b7c92815a3ec10122df8"} Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.907025 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-sws85" event={"ID":"002c5229-a237-4f6b-a323-f28d0eb09124","Type":"ContainerStarted","Data":"0e95fdb4ab1d59be8fef5445bfa536b18870a7d7e7d938a8b087169317427afa"} Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.910849 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-vwf8h" podStartSLOduration=4.9108172329999995 podStartE2EDuration="4.910817233s" podCreationTimestamp="2025-12-10 11:13:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:21.899121605 +0000 UTC m=+1706.752515048" watchObservedRunningTime="2025-12-10 11:13:21.910817233 +0000 UTC m=+1706.764210676" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.948945 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:21 crc kubenswrapper[4780]: I1210 11:13:21.968310 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-2e1e-account-create-update-xshf7" podStartSLOduration=4.968274899 podStartE2EDuration="4.968274899s" podCreationTimestamp="2025-12-10 11:13:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:21.94676742 +0000 UTC m=+1706.800160863" watchObservedRunningTime="2025-12-10 11:13:21.968274899 +0000 UTC m=+1706.821668342" Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.013538 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-sws85" podStartSLOduration=5.013505672 podStartE2EDuration="5.013505672s" podCreationTimestamp="2025-12-10 11:13:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:21.964424 +0000 UTC m=+1706.817817443" watchObservedRunningTime="2025-12-10 11:13:22.013505672 +0000 UTC m=+1706.866899115" Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.027655 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-ab68-account-create-update-ss7hk" podStartSLOduration=5.027612682 podStartE2EDuration="5.027612682s" podCreationTimestamp="2025-12-10 11:13:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:21.987368366 +0000 UTC m=+1706.840761809" watchObservedRunningTime="2025-12-10 11:13:22.027612682 +0000 UTC m=+1706.881006125" Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.114869 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-bfhjg"] Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.651665 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-3721-account-create-update-gbldb"] Dec 10 11:13:22 crc kubenswrapper[4780]: W1210 11:13:22.679698 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a999d24_37c5_4026_9b4a_31e3642077e1.slice/crio-7acfac095eb91ae1f0a9973192ab8f2810c491662ebed5dd317c5ef4cdf44985 WatchSource:0}: Error finding container 7acfac095eb91ae1f0a9973192ab8f2810c491662ebed5dd317c5ef4cdf44985: Status 404 returned error can't find the container with id 7acfac095eb91ae1f0a9973192ab8f2810c491662ebed5dd317c5ef4cdf44985 Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.921987 4780 generic.go:334] "Generic (PLEG): container finished" podID="8bc9d7bc-3890-4523-b805-9cd4c167fd9b" containerID="8e46cfb55ce036b037d8dcf5b5ee9af30c938e54396d6effc1e3c728949b5a25" exitCode=0 Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.922268 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-vwf8h" event={"ID":"8bc9d7bc-3890-4523-b805-9cd4c167fd9b","Type":"ContainerDied","Data":"8e46cfb55ce036b037d8dcf5b5ee9af30c938e54396d6effc1e3c728949b5a25"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.926244 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqjpk" event={"ID":"a96954e9-6eb0-41ea-851e-41b5f7ef3197","Type":"ContainerStarted","Data":"1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.931662 4780 generic.go:334] "Generic (PLEG): container finished" podID="c6e99715-0d2d-4998-899e-58c68d7db78a" containerID="53fa60f7a7b2ec54ab179f0eb28668d7df18f1b54aaf119b9c48ae469fc9867f" exitCode=0 Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.931779 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2e1e-account-create-update-xshf7" event={"ID":"c6e99715-0d2d-4998-899e-58c68d7db78a","Type":"ContainerDied","Data":"53fa60f7a7b2ec54ab179f0eb28668d7df18f1b54aaf119b9c48ae469fc9867f"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.934668 4780 generic.go:334] "Generic (PLEG): container finished" podID="bd881333-f3fb-4c4d-b31e-f755f9c1271d" containerID="3cca7f3649b3134d461ac278be985fe5a5bc49056f19b7c92815a3ec10122df8" exitCode=0 Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.934768 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ab68-account-create-update-ss7hk" event={"ID":"bd881333-f3fb-4c4d-b31e-f755f9c1271d","Type":"ContainerDied","Data":"3cca7f3649b3134d461ac278be985fe5a5bc49056f19b7c92815a3ec10122df8"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.954209 4780 generic.go:334] "Generic (PLEG): container finished" podID="002c5229-a237-4f6b-a323-f28d0eb09124" containerID="0e95fdb4ab1d59be8fef5445bfa536b18870a7d7e7d938a8b087169317427afa" exitCode=0 Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.954464 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-sws85" event={"ID":"002c5229-a237-4f6b-a323-f28d0eb09124","Type":"ContainerDied","Data":"0e95fdb4ab1d59be8fef5445bfa536b18870a7d7e7d938a8b087169317427afa"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.965031 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" event={"ID":"b65fc796-3b7d-44e5-98eb-898c371a7174","Type":"ContainerStarted","Data":"626c0c093995ce580fcfbc3504e137478edbffdc5bd41f95d51d4b9c88916e78"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.965113 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" event={"ID":"b65fc796-3b7d-44e5-98eb-898c371a7174","Type":"ContainerStarted","Data":"e7e89e7bde68dc373a730402ea4558a0d7a3bdc02772de28f294475bfa38034a"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.968718 4780 generic.go:334] "Generic (PLEG): container finished" podID="1d18f911-380d-4e2d-915b-18621871d0f5" containerID="07ea6848dda70e7980c76502a9b5d5b203e25ca1a5790393a548aae5e22f3c96" exitCode=0 Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.968844 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-1476-account-create-update-wszkq" event={"ID":"1d18f911-380d-4e2d-915b-18621871d0f5","Type":"ContainerDied","Data":"07ea6848dda70e7980c76502a9b5d5b203e25ca1a5790393a548aae5e22f3c96"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.977028 4780 generic.go:334] "Generic (PLEG): container finished" podID="df258243-e086-4015-829c-01101a52b26e" containerID="a710a8ec6e7b61ce3c285a19fab368a1c68a89886775139086243d00f6cb3af4" exitCode=0 Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.977158 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-sms69" event={"ID":"df258243-e086-4015-829c-01101a52b26e","Type":"ContainerDied","Data":"a710a8ec6e7b61ce3c285a19fab368a1c68a89886775139086243d00f6cb3af4"} Dec 10 11:13:22 crc kubenswrapper[4780]: I1210 11:13:22.987497 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" event={"ID":"5a999d24-37c5-4026-9b4a-31e3642077e1","Type":"ContainerStarted","Data":"7acfac095eb91ae1f0a9973192ab8f2810c491662ebed5dd317c5ef4cdf44985"} Dec 10 11:13:23 crc kubenswrapper[4780]: I1210 11:13:23.069040 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" podStartSLOduration=2.068913473 podStartE2EDuration="2.068913473s" podCreationTimestamp="2025-12-10 11:13:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:23.063539906 +0000 UTC m=+1707.916933349" watchObservedRunningTime="2025-12-10 11:13:23.068913473 +0000 UTC m=+1707.922306916" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.002339 4780 generic.go:334] "Generic (PLEG): container finished" podID="b65fc796-3b7d-44e5-98eb-898c371a7174" containerID="626c0c093995ce580fcfbc3504e137478edbffdc5bd41f95d51d4b9c88916e78" exitCode=0 Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.002476 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" event={"ID":"b65fc796-3b7d-44e5-98eb-898c371a7174","Type":"ContainerDied","Data":"626c0c093995ce580fcfbc3504e137478edbffdc5bd41f95d51d4b9c88916e78"} Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.009559 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" event={"ID":"5a999d24-37c5-4026-9b4a-31e3642077e1","Type":"ContainerStarted","Data":"90a35e60ec99a8c5e69f1eb51dc947804ced21f35ecd00644bcd940c803e0ebc"} Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.018130 4780 generic.go:334] "Generic (PLEG): container finished" podID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerID="1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367" exitCode=0 Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.018421 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqjpk" event={"ID":"a96954e9-6eb0-41ea-851e-41b5f7ef3197","Type":"ContainerDied","Data":"1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367"} Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.069625 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" podStartSLOduration=3.069587167 podStartE2EDuration="3.069587167s" podCreationTimestamp="2025-12-10 11:13:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:13:24.049778082 +0000 UTC m=+1708.903171525" watchObservedRunningTime="2025-12-10 11:13:24.069587167 +0000 UTC m=+1708.922980610" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.661677 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.826648 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sq7z9\" (UniqueName: \"kubernetes.io/projected/bd881333-f3fb-4c4d-b31e-f755f9c1271d-kube-api-access-sq7z9\") pod \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\" (UID: \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\") " Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.827439 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd881333-f3fb-4c4d-b31e-f755f9c1271d-operator-scripts\") pod \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\" (UID: \"bd881333-f3fb-4c4d-b31e-f755f9c1271d\") " Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.828527 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd881333-f3fb-4c4d-b31e-f755f9c1271d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd881333-f3fb-4c4d-b31e-f755f9c1271d" (UID: "bd881333-f3fb-4c4d-b31e-f755f9c1271d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.838348 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd881333-f3fb-4c4d-b31e-f755f9c1271d-kube-api-access-sq7z9" (OuterVolumeSpecName: "kube-api-access-sq7z9") pod "bd881333-f3fb-4c4d-b31e-f755f9c1271d" (UID: "bd881333-f3fb-4c4d-b31e-f755f9c1271d"). InnerVolumeSpecName "kube-api-access-sq7z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.936037 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd881333-f3fb-4c4d-b31e-f755f9c1271d-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.936094 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sq7z9\" (UniqueName: \"kubernetes.io/projected/bd881333-f3fb-4c4d-b31e-f755f9c1271d-kube-api-access-sq7z9\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.960419 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.987010 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.996942 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:24 crc kubenswrapper[4780]: I1210 11:13:24.998936 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-sws85" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.034644 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-sms69" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.080825 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ab68-account-create-update-ss7hk" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.086227 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ab68-account-create-update-ss7hk" event={"ID":"bd881333-f3fb-4c4d-b31e-f755f9c1271d","Type":"ContainerDied","Data":"e6f03c09f927e21ae1c4b8b0b3f595d9cb0ee77a1dcc7259e85b064e3bb778ff"} Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.086345 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6f03c09f927e21ae1c4b8b0b3f595d9cb0ee77a1dcc7259e85b064e3bb778ff" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.120465 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-sws85" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.120370 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-sws85" event={"ID":"002c5229-a237-4f6b-a323-f28d0eb09124","Type":"ContainerDied","Data":"51a693b91c80acfc4bc1f22c1b56ff1da210858911109d247564097c91c2c639"} Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.120870 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51a693b91c80acfc4bc1f22c1b56ff1da210858911109d247564097c91c2c639" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.126280 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-sms69" event={"ID":"df258243-e086-4015-829c-01101a52b26e","Type":"ContainerDied","Data":"d2a2965e0c30942b021686c2654e5338df74f90c80bb1359fca55a7deff69593"} Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.126383 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2a2965e0c30942b021686c2654e5338df74f90c80bb1359fca55a7deff69593" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.126513 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-sms69" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.151210 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-1476-account-create-update-wszkq" event={"ID":"1d18f911-380d-4e2d-915b-18621871d0f5","Type":"ContainerDied","Data":"7628759c642e52cbb9bac8223e106ca70f53a77f4f94664fef68607d75889717"} Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.151287 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7628759c642e52cbb9bac8223e106ca70f53a77f4f94664fef68607d75889717" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.151431 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-1476-account-create-update-wszkq" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.157369 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" event={"ID":"5a999d24-37c5-4026-9b4a-31e3642077e1","Type":"ContainerDied","Data":"90a35e60ec99a8c5e69f1eb51dc947804ced21f35ecd00644bcd940c803e0ebc"} Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.157819 4780 generic.go:334] "Generic (PLEG): container finished" podID="5a999d24-37c5-4026-9b4a-31e3642077e1" containerID="90a35e60ec99a8c5e69f1eb51dc947804ced21f35ecd00644bcd940c803e0ebc" exitCode=0 Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.158581 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8wp8\" (UniqueName: \"kubernetes.io/projected/df258243-e086-4015-829c-01101a52b26e-kube-api-access-w8wp8\") pod \"df258243-e086-4015-829c-01101a52b26e\" (UID: \"df258243-e086-4015-829c-01101a52b26e\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.158623 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qw4v\" (UniqueName: \"kubernetes.io/projected/1d18f911-380d-4e2d-915b-18621871d0f5-kube-api-access-4qw4v\") pod \"1d18f911-380d-4e2d-915b-18621871d0f5\" (UID: \"1d18f911-380d-4e2d-915b-18621871d0f5\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.158737 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6e99715-0d2d-4998-899e-58c68d7db78a-operator-scripts\") pod \"c6e99715-0d2d-4998-899e-58c68d7db78a\" (UID: \"c6e99715-0d2d-4998-899e-58c68d7db78a\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.158786 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d18f911-380d-4e2d-915b-18621871d0f5-operator-scripts\") pod \"1d18f911-380d-4e2d-915b-18621871d0f5\" (UID: \"1d18f911-380d-4e2d-915b-18621871d0f5\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.159113 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df258243-e086-4015-829c-01101a52b26e-operator-scripts\") pod \"df258243-e086-4015-829c-01101a52b26e\" (UID: \"df258243-e086-4015-829c-01101a52b26e\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.159148 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4hdl\" (UniqueName: \"kubernetes.io/projected/c6e99715-0d2d-4998-899e-58c68d7db78a-kube-api-access-w4hdl\") pod \"c6e99715-0d2d-4998-899e-58c68d7db78a\" (UID: \"c6e99715-0d2d-4998-899e-58c68d7db78a\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.159179 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-operator-scripts\") pod \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\" (UID: \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.159291 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8qm6\" (UniqueName: \"kubernetes.io/projected/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-kube-api-access-g8qm6\") pod \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\" (UID: \"8bc9d7bc-3890-4523-b805-9cd4c167fd9b\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.159394 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59bbp\" (UniqueName: \"kubernetes.io/projected/002c5229-a237-4f6b-a323-f28d0eb09124-kube-api-access-59bbp\") pod \"002c5229-a237-4f6b-a323-f28d0eb09124\" (UID: \"002c5229-a237-4f6b-a323-f28d0eb09124\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.159438 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/002c5229-a237-4f6b-a323-f28d0eb09124-operator-scripts\") pod \"002c5229-a237-4f6b-a323-f28d0eb09124\" (UID: \"002c5229-a237-4f6b-a323-f28d0eb09124\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.160951 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6e99715-0d2d-4998-899e-58c68d7db78a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c6e99715-0d2d-4998-899e-58c68d7db78a" (UID: "c6e99715-0d2d-4998-899e-58c68d7db78a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.162047 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d18f911-380d-4e2d-915b-18621871d0f5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1d18f911-380d-4e2d-915b-18621871d0f5" (UID: "1d18f911-380d-4e2d-915b-18621871d0f5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.162429 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/002c5229-a237-4f6b-a323-f28d0eb09124-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "002c5229-a237-4f6b-a323-f28d0eb09124" (UID: "002c5229-a237-4f6b-a323-f28d0eb09124"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.162613 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df258243-e086-4015-829c-01101a52b26e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "df258243-e086-4015-829c-01101a52b26e" (UID: "df258243-e086-4015-829c-01101a52b26e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.163711 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8bc9d7bc-3890-4523-b805-9cd4c167fd9b" (UID: "8bc9d7bc-3890-4523-b805-9cd4c167fd9b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.168226 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-kube-api-access-g8qm6" (OuterVolumeSpecName: "kube-api-access-g8qm6") pod "8bc9d7bc-3890-4523-b805-9cd4c167fd9b" (UID: "8bc9d7bc-3890-4523-b805-9cd4c167fd9b"). InnerVolumeSpecName "kube-api-access-g8qm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.168799 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d18f911-380d-4e2d-915b-18621871d0f5-kube-api-access-4qw4v" (OuterVolumeSpecName: "kube-api-access-4qw4v") pod "1d18f911-380d-4e2d-915b-18621871d0f5" (UID: "1d18f911-380d-4e2d-915b-18621871d0f5"). InnerVolumeSpecName "kube-api-access-4qw4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.172115 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-vwf8h" event={"ID":"8bc9d7bc-3890-4523-b805-9cd4c167fd9b","Type":"ContainerDied","Data":"bb97b5a162334ac93e7f401d2a03bd64b97cc892387c00213a8770e883d63c3f"} Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.172716 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb97b5a162334ac93e7f401d2a03bd64b97cc892387c00213a8770e883d63c3f" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.172503 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-vwf8h" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.175978 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6e99715-0d2d-4998-899e-58c68d7db78a-kube-api-access-w4hdl" (OuterVolumeSpecName: "kube-api-access-w4hdl") pod "c6e99715-0d2d-4998-899e-58c68d7db78a" (UID: "c6e99715-0d2d-4998-899e-58c68d7db78a"). InnerVolumeSpecName "kube-api-access-w4hdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.176132 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/002c5229-a237-4f6b-a323-f28d0eb09124-kube-api-access-59bbp" (OuterVolumeSpecName: "kube-api-access-59bbp") pod "002c5229-a237-4f6b-a323-f28d0eb09124" (UID: "002c5229-a237-4f6b-a323-f28d0eb09124"). InnerVolumeSpecName "kube-api-access-59bbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.182140 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-2e1e-account-create-update-xshf7" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.183123 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-2e1e-account-create-update-xshf7" event={"ID":"c6e99715-0d2d-4998-899e-58c68d7db78a","Type":"ContainerDied","Data":"54829e742b0c6a2da5c38d6edb8bbd877bf78ddc8592ee482a708aca201356a6"} Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.183175 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54829e742b0c6a2da5c38d6edb8bbd877bf78ddc8592ee482a708aca201356a6" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.189216 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df258243-e086-4015-829c-01101a52b26e-kube-api-access-w8wp8" (OuterVolumeSpecName: "kube-api-access-w8wp8") pod "df258243-e086-4015-829c-01101a52b26e" (UID: "df258243-e086-4015-829c-01101a52b26e"). InnerVolumeSpecName "kube-api-access-w8wp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.262294 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df258243-e086-4015-829c-01101a52b26e-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.262689 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4hdl\" (UniqueName: \"kubernetes.io/projected/c6e99715-0d2d-4998-899e-58c68d7db78a-kube-api-access-w4hdl\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.262763 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.262822 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8qm6\" (UniqueName: \"kubernetes.io/projected/8bc9d7bc-3890-4523-b805-9cd4c167fd9b-kube-api-access-g8qm6\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.262976 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59bbp\" (UniqueName: \"kubernetes.io/projected/002c5229-a237-4f6b-a323-f28d0eb09124-kube-api-access-59bbp\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.263068 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/002c5229-a237-4f6b-a323-f28d0eb09124-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.263262 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8wp8\" (UniqueName: \"kubernetes.io/projected/df258243-e086-4015-829c-01101a52b26e-kube-api-access-w8wp8\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.263371 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qw4v\" (UniqueName: \"kubernetes.io/projected/1d18f911-380d-4e2d-915b-18621871d0f5-kube-api-access-4qw4v\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.263464 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6e99715-0d2d-4998-899e-58c68d7db78a-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.263545 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d18f911-380d-4e2d-915b-18621871d0f5-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.675538 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:13:25 crc kubenswrapper[4780]: E1210 11:13:25.675864 4780 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:13:25 crc kubenswrapper[4780]: E1210 11:13:25.676164 4780 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:13:25 crc kubenswrapper[4780]: E1210 11:13:25.676263 4780 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift podName:3e1a1225-bdae-4dcb-b10a-02504fe590cd nodeName:}" failed. No retries permitted until 2025-12-10 11:13:57.676228397 +0000 UTC m=+1742.529621840 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift") pod "swift-storage-0" (UID: "3e1a1225-bdae-4dcb-b10a-02504fe590cd") : configmap "swift-ring-files" not found Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.714482 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.881299 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pdwx\" (UniqueName: \"kubernetes.io/projected/b65fc796-3b7d-44e5-98eb-898c371a7174-kube-api-access-4pdwx\") pod \"b65fc796-3b7d-44e5-98eb-898c371a7174\" (UID: \"b65fc796-3b7d-44e5-98eb-898c371a7174\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.881360 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b65fc796-3b7d-44e5-98eb-898c371a7174-operator-scripts\") pod \"b65fc796-3b7d-44e5-98eb-898c371a7174\" (UID: \"b65fc796-3b7d-44e5-98eb-898c371a7174\") " Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.882482 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b65fc796-3b7d-44e5-98eb-898c371a7174-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b65fc796-3b7d-44e5-98eb-898c371a7174" (UID: "b65fc796-3b7d-44e5-98eb-898c371a7174"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.887116 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b65fc796-3b7d-44e5-98eb-898c371a7174-kube-api-access-4pdwx" (OuterVolumeSpecName: "kube-api-access-4pdwx") pod "b65fc796-3b7d-44e5-98eb-898c371a7174" (UID: "b65fc796-3b7d-44e5-98eb-898c371a7174"). InnerVolumeSpecName "kube-api-access-4pdwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.986299 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pdwx\" (UniqueName: \"kubernetes.io/projected/b65fc796-3b7d-44e5-98eb-898c371a7174-kube-api-access-4pdwx\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:25 crc kubenswrapper[4780]: I1210 11:13:25.986360 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b65fc796-3b7d-44e5-98eb-898c371a7174-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.201296 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqjpk" event={"ID":"a96954e9-6eb0-41ea-851e-41b5f7ef3197","Type":"ContainerStarted","Data":"b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928"} Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.209992 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.211176 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-bfhjg" event={"ID":"b65fc796-3b7d-44e5-98eb-898c371a7174","Type":"ContainerDied","Data":"e7e89e7bde68dc373a730402ea4558a0d7a3bdc02772de28f294475bfa38034a"} Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.211218 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7e89e7bde68dc373a730402ea4558a0d7a3bdc02772de28f294475bfa38034a" Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.229861 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qqjpk" podStartSLOduration=4.298140223 podStartE2EDuration="8.229829388s" podCreationTimestamp="2025-12-10 11:13:18 +0000 UTC" firstStartedPulling="2025-12-10 11:13:21.854017944 +0000 UTC m=+1706.707411387" lastFinishedPulling="2025-12-10 11:13:25.785707109 +0000 UTC m=+1710.639100552" observedRunningTime="2025-12-10 11:13:26.226613685 +0000 UTC m=+1711.080007138" watchObservedRunningTime="2025-12-10 11:13:26.229829388 +0000 UTC m=+1711.083222831" Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.722320 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.775702 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.914346 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jv9jw\" (UniqueName: \"kubernetes.io/projected/5a999d24-37c5-4026-9b4a-31e3642077e1-kube-api-access-jv9jw\") pod \"5a999d24-37c5-4026-9b4a-31e3642077e1\" (UID: \"5a999d24-37c5-4026-9b4a-31e3642077e1\") " Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.914802 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a999d24-37c5-4026-9b4a-31e3642077e1-operator-scripts\") pod \"5a999d24-37c5-4026-9b4a-31e3642077e1\" (UID: \"5a999d24-37c5-4026-9b4a-31e3642077e1\") " Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.917778 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a999d24-37c5-4026-9b4a-31e3642077e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5a999d24-37c5-4026-9b4a-31e3642077e1" (UID: "5a999d24-37c5-4026-9b4a-31e3642077e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:26 crc kubenswrapper[4780]: I1210 11:13:26.929615 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a999d24-37c5-4026-9b4a-31e3642077e1-kube-api-access-jv9jw" (OuterVolumeSpecName: "kube-api-access-jv9jw") pod "5a999d24-37c5-4026-9b4a-31e3642077e1" (UID: "5a999d24-37c5-4026-9b4a-31e3642077e1"). InnerVolumeSpecName "kube-api-access-jv9jw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:27 crc kubenswrapper[4780]: I1210 11:13:27.018526 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a999d24-37c5-4026-9b4a-31e3642077e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:27 crc kubenswrapper[4780]: I1210 11:13:27.018601 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jv9jw\" (UniqueName: \"kubernetes.io/projected/5a999d24-37c5-4026-9b4a-31e3642077e1-kube-api-access-jv9jw\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:27 crc kubenswrapper[4780]: I1210 11:13:27.271709 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" Dec 10 11:13:27 crc kubenswrapper[4780]: I1210 11:13:27.272055 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-3721-account-create-update-gbldb" event={"ID":"5a999d24-37c5-4026-9b4a-31e3642077e1","Type":"ContainerDied","Data":"7acfac095eb91ae1f0a9973192ab8f2810c491662ebed5dd317c5ef4cdf44985"} Dec 10 11:13:27 crc kubenswrapper[4780]: I1210 11:13:27.272145 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7acfac095eb91ae1f0a9973192ab8f2810c491662ebed5dd317c5ef4cdf44985" Dec 10 11:13:27 crc kubenswrapper[4780]: I1210 11:13:27.475561 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:13:27 crc kubenswrapper[4780]: I1210 11:13:27.475633 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.217054 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-m9l8h"] Dec 10 11:13:28 crc kubenswrapper[4780]: E1210 11:13:28.218107 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd881333-f3fb-4c4d-b31e-f755f9c1271d" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218124 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd881333-f3fb-4c4d-b31e-f755f9c1271d" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: E1210 11:13:28.218138 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6e99715-0d2d-4998-899e-58c68d7db78a" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218145 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6e99715-0d2d-4998-899e-58c68d7db78a" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: E1210 11:13:28.218176 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bc9d7bc-3890-4523-b805-9cd4c167fd9b" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218182 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bc9d7bc-3890-4523-b805-9cd4c167fd9b" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: E1210 11:13:28.218193 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d18f911-380d-4e2d-915b-18621871d0f5" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218200 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d18f911-380d-4e2d-915b-18621871d0f5" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: E1210 11:13:28.218217 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b65fc796-3b7d-44e5-98eb-898c371a7174" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218231 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b65fc796-3b7d-44e5-98eb-898c371a7174" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: E1210 11:13:28.218242 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df258243-e086-4015-829c-01101a52b26e" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218248 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="df258243-e086-4015-829c-01101a52b26e" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: E1210 11:13:28.218256 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="002c5229-a237-4f6b-a323-f28d0eb09124" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218262 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="002c5229-a237-4f6b-a323-f28d0eb09124" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: E1210 11:13:28.218270 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a999d24-37c5-4026-9b4a-31e3642077e1" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218277 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a999d24-37c5-4026-9b4a-31e3642077e1" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218564 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="df258243-e086-4015-829c-01101a52b26e" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218587 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd881333-f3fb-4c4d-b31e-f755f9c1271d" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218604 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a999d24-37c5-4026-9b4a-31e3642077e1" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218621 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="002c5229-a237-4f6b-a323-f28d0eb09124" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218633 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="b65fc796-3b7d-44e5-98eb-898c371a7174" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218648 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d18f911-380d-4e2d-915b-18621871d0f5" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218657 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bc9d7bc-3890-4523-b805-9cd4c167fd9b" containerName="mariadb-database-create" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.218671 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6e99715-0d2d-4998-899e-58c68d7db78a" containerName="mariadb-account-create-update" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.219902 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.227382 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.227775 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-4dhzr" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.236197 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-m9l8h"] Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.365715 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-db-sync-config-data\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.365829 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt92d\" (UniqueName: \"kubernetes.io/projected/626bc022-de20-4c32-ad5b-bd22a54340ce-kube-api-access-kt92d\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.365853 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-combined-ca-bundle\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.366210 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-config-data\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.468996 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-config-data\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.469384 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-db-sync-config-data\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.469481 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt92d\" (UniqueName: \"kubernetes.io/projected/626bc022-de20-4c32-ad5b-bd22a54340ce-kube-api-access-kt92d\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.469519 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-combined-ca-bundle\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.482284 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-db-sync-config-data\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.483083 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-combined-ca-bundle\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.489599 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-config-data\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.503146 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt92d\" (UniqueName: \"kubernetes.io/projected/626bc022-de20-4c32-ad5b-bd22a54340ce-kube-api-access-kt92d\") pod \"glance-db-sync-m9l8h\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.545817 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m9l8h" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.726141 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:28 crc kubenswrapper[4780]: I1210 11:13:28.726241 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:29 crc kubenswrapper[4780]: I1210 11:13:29.796980 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-qqjpk" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="registry-server" probeResult="failure" output=< Dec 10 11:13:29 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:13:29 crc kubenswrapper[4780]: > Dec 10 11:13:31 crc kubenswrapper[4780]: I1210 11:13:31.522969 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-wt5zb" podUID="6bd77f46-f3d3-45a7-bc8e-f3de677e1583" containerName="ovn-controller" probeResult="failure" output=< Dec 10 11:13:31 crc kubenswrapper[4780]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 10 11:13:31 crc kubenswrapper[4780]: > Dec 10 11:13:31 crc kubenswrapper[4780]: I1210 11:13:31.800237 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq"] Dec 10 11:13:31 crc kubenswrapper[4780]: I1210 11:13:31.803189 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:31 crc kubenswrapper[4780]: I1210 11:13:31.810357 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq"] Dec 10 11:13:31 crc kubenswrapper[4780]: I1210 11:13:31.978503 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3701bf21-11ac-4fea-8e61-f5b837dcf713-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-r5ntq\" (UID: \"3701bf21-11ac-4fea-8e61-f5b837dcf713\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:31 crc kubenswrapper[4780]: I1210 11:13:31.978596 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9j2n\" (UniqueName: \"kubernetes.io/projected/3701bf21-11ac-4fea-8e61-f5b837dcf713-kube-api-access-f9j2n\") pod \"mysqld-exporter-openstack-cell1-db-create-r5ntq\" (UID: \"3701bf21-11ac-4fea-8e61-f5b837dcf713\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.016149 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-69b5-account-create-update-4q87k"] Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.018949 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.027610 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.046783 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-69b5-account-create-update-4q87k"] Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.083660 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3701bf21-11ac-4fea-8e61-f5b837dcf713-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-r5ntq\" (UID: \"3701bf21-11ac-4fea-8e61-f5b837dcf713\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.083746 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9j2n\" (UniqueName: \"kubernetes.io/projected/3701bf21-11ac-4fea-8e61-f5b837dcf713-kube-api-access-f9j2n\") pod \"mysqld-exporter-openstack-cell1-db-create-r5ntq\" (UID: \"3701bf21-11ac-4fea-8e61-f5b837dcf713\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.083969 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-operator-scripts\") pod \"mysqld-exporter-69b5-account-create-update-4q87k\" (UID: \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\") " pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.084028 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cd8r\" (UniqueName: \"kubernetes.io/projected/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-kube-api-access-5cd8r\") pod \"mysqld-exporter-69b5-account-create-update-4q87k\" (UID: \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\") " pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.085595 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3701bf21-11ac-4fea-8e61-f5b837dcf713-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-r5ntq\" (UID: \"3701bf21-11ac-4fea-8e61-f5b837dcf713\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.113419 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9j2n\" (UniqueName: \"kubernetes.io/projected/3701bf21-11ac-4fea-8e61-f5b837dcf713-kube-api-access-f9j2n\") pod \"mysqld-exporter-openstack-cell1-db-create-r5ntq\" (UID: \"3701bf21-11ac-4fea-8e61-f5b837dcf713\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.144030 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.186682 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-operator-scripts\") pod \"mysqld-exporter-69b5-account-create-update-4q87k\" (UID: \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\") " pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.186789 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cd8r\" (UniqueName: \"kubernetes.io/projected/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-kube-api-access-5cd8r\") pod \"mysqld-exporter-69b5-account-create-update-4q87k\" (UID: \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\") " pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.187875 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-operator-scripts\") pod \"mysqld-exporter-69b5-account-create-update-4q87k\" (UID: \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\") " pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.213597 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cd8r\" (UniqueName: \"kubernetes.io/projected/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-kube-api-access-5cd8r\") pod \"mysqld-exporter-69b5-account-create-update-4q87k\" (UID: \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\") " pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:32 crc kubenswrapper[4780]: I1210 11:13:32.350717 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:33 crc kubenswrapper[4780]: I1210 11:13:33.445776 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerStarted","Data":"606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0"} Dec 10 11:13:33 crc kubenswrapper[4780]: I1210 11:13:33.456529 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-69b5-account-create-update-4q87k"] Dec 10 11:13:33 crc kubenswrapper[4780]: I1210 11:13:33.462961 4780 generic.go:334] "Generic (PLEG): container finished" podID="72627815-752d-44a8-96cc-428f0239411d" containerID="83081b3dd12b5034cbe7328cde95d8befaa77fdb55499b34324cb97369597a7b" exitCode=0 Dec 10 11:13:33 crc kubenswrapper[4780]: I1210 11:13:33.463271 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-5zqjq" event={"ID":"72627815-752d-44a8-96cc-428f0239411d","Type":"ContainerDied","Data":"83081b3dd12b5034cbe7328cde95d8befaa77fdb55499b34324cb97369597a7b"} Dec 10 11:13:33 crc kubenswrapper[4780]: I1210 11:13:33.515856 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq"] Dec 10 11:13:34 crc kubenswrapper[4780]: I1210 11:13:34.123643 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-m9l8h"] Dec 10 11:13:34 crc kubenswrapper[4780]: I1210 11:13:34.497101 4780 generic.go:334] "Generic (PLEG): container finished" podID="3701bf21-11ac-4fea-8e61-f5b837dcf713" containerID="c24671d4d85b16e517f5138e655fffe93008cbcb9c8383aba20be513e9ec5e8c" exitCode=0 Dec 10 11:13:34 crc kubenswrapper[4780]: I1210 11:13:34.497483 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" event={"ID":"3701bf21-11ac-4fea-8e61-f5b837dcf713","Type":"ContainerDied","Data":"c24671d4d85b16e517f5138e655fffe93008cbcb9c8383aba20be513e9ec5e8c"} Dec 10 11:13:34 crc kubenswrapper[4780]: I1210 11:13:34.497571 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" event={"ID":"3701bf21-11ac-4fea-8e61-f5b837dcf713","Type":"ContainerStarted","Data":"536da7d406aaf8c49a30dd614ebf24e32b9825003e74a6f3fdc18652dd648fd9"} Dec 10 11:13:34 crc kubenswrapper[4780]: I1210 11:13:34.503622 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m9l8h" event={"ID":"626bc022-de20-4c32-ad5b-bd22a54340ce","Type":"ContainerStarted","Data":"adb12627998d3460f06fcb9fa3a677ac62751cd8ce63b212d5d1e3e9105606aa"} Dec 10 11:13:34 crc kubenswrapper[4780]: I1210 11:13:34.523541 4780 generic.go:334] "Generic (PLEG): container finished" podID="5e0f03ab-2e1d-406a-a4a0-c699b5a68e90" containerID="f4770b342ae8db996830be5ee078da9ada45b1ed1159abcfa4aa9cbe2660bb95" exitCode=0 Dec 10 11:13:34 crc kubenswrapper[4780]: I1210 11:13:34.524738 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" event={"ID":"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90","Type":"ContainerDied","Data":"f4770b342ae8db996830be5ee078da9ada45b1ed1159abcfa4aa9cbe2660bb95"} Dec 10 11:13:34 crc kubenswrapper[4780]: I1210 11:13:34.525107 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" event={"ID":"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90","Type":"ContainerStarted","Data":"d65841337cbe1e3b276fa3dbea1c00817fdac179e68e646fcc55db940f8229da"} Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.022244 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.168292 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-swiftconf\") pod \"72627815-752d-44a8-96cc-428f0239411d\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.168400 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-combined-ca-bundle\") pod \"72627815-752d-44a8-96cc-428f0239411d\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.168738 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5l8ng\" (UniqueName: \"kubernetes.io/projected/72627815-752d-44a8-96cc-428f0239411d-kube-api-access-5l8ng\") pod \"72627815-752d-44a8-96cc-428f0239411d\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.169723 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-dispersionconf\") pod \"72627815-752d-44a8-96cc-428f0239411d\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.169784 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-ring-data-devices\") pod \"72627815-752d-44a8-96cc-428f0239411d\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.169845 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-scripts\") pod \"72627815-752d-44a8-96cc-428f0239411d\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.170032 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/72627815-752d-44a8-96cc-428f0239411d-etc-swift\") pod \"72627815-752d-44a8-96cc-428f0239411d\" (UID: \"72627815-752d-44a8-96cc-428f0239411d\") " Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.173524 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72627815-752d-44a8-96cc-428f0239411d-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "72627815-752d-44a8-96cc-428f0239411d" (UID: "72627815-752d-44a8-96cc-428f0239411d"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.173703 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "72627815-752d-44a8-96cc-428f0239411d" (UID: "72627815-752d-44a8-96cc-428f0239411d"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.183518 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72627815-752d-44a8-96cc-428f0239411d-kube-api-access-5l8ng" (OuterVolumeSpecName: "kube-api-access-5l8ng") pod "72627815-752d-44a8-96cc-428f0239411d" (UID: "72627815-752d-44a8-96cc-428f0239411d"). InnerVolumeSpecName "kube-api-access-5l8ng". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.193158 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "72627815-752d-44a8-96cc-428f0239411d" (UID: "72627815-752d-44a8-96cc-428f0239411d"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.214762 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-scripts" (OuterVolumeSpecName: "scripts") pod "72627815-752d-44a8-96cc-428f0239411d" (UID: "72627815-752d-44a8-96cc-428f0239411d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.216709 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "72627815-752d-44a8-96cc-428f0239411d" (UID: "72627815-752d-44a8-96cc-428f0239411d"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.223778 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "72627815-752d-44a8-96cc-428f0239411d" (UID: "72627815-752d-44a8-96cc-428f0239411d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.274541 4780 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/72627815-752d-44a8-96cc-428f0239411d-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.274591 4780 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.274601 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.274615 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5l8ng\" (UniqueName: \"kubernetes.io/projected/72627815-752d-44a8-96cc-428f0239411d-kube-api-access-5l8ng\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.274626 4780 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/72627815-752d-44a8-96cc-428f0239411d-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.274634 4780 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.274644 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/72627815-752d-44a8-96cc-428f0239411d-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.540689 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-5zqjq" event={"ID":"72627815-752d-44a8-96cc-428f0239411d","Type":"ContainerDied","Data":"0b5ec7aa841cfd6599ea90e44f0614da75732331ab7745877497eba87e12f3df"} Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.540775 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b5ec7aa841cfd6599ea90e44f0614da75732331ab7745877497eba87e12f3df" Dec 10 11:13:35 crc kubenswrapper[4780]: I1210 11:13:35.540723 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-5zqjq" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.146562 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.163431 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.332981 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cd8r\" (UniqueName: \"kubernetes.io/projected/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-kube-api-access-5cd8r\") pod \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\" (UID: \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\") " Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.333064 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-operator-scripts\") pod \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\" (UID: \"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90\") " Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.333225 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9j2n\" (UniqueName: \"kubernetes.io/projected/3701bf21-11ac-4fea-8e61-f5b837dcf713-kube-api-access-f9j2n\") pod \"3701bf21-11ac-4fea-8e61-f5b837dcf713\" (UID: \"3701bf21-11ac-4fea-8e61-f5b837dcf713\") " Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.333275 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3701bf21-11ac-4fea-8e61-f5b837dcf713-operator-scripts\") pod \"3701bf21-11ac-4fea-8e61-f5b837dcf713\" (UID: \"3701bf21-11ac-4fea-8e61-f5b837dcf713\") " Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.334459 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5e0f03ab-2e1d-406a-a4a0-c699b5a68e90" (UID: "5e0f03ab-2e1d-406a-a4a0-c699b5a68e90"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.334633 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3701bf21-11ac-4fea-8e61-f5b837dcf713-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3701bf21-11ac-4fea-8e61-f5b837dcf713" (UID: "3701bf21-11ac-4fea-8e61-f5b837dcf713"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.336264 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.336295 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3701bf21-11ac-4fea-8e61-f5b837dcf713-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.343189 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3701bf21-11ac-4fea-8e61-f5b837dcf713-kube-api-access-f9j2n" (OuterVolumeSpecName: "kube-api-access-f9j2n") pod "3701bf21-11ac-4fea-8e61-f5b837dcf713" (UID: "3701bf21-11ac-4fea-8e61-f5b837dcf713"). InnerVolumeSpecName "kube-api-access-f9j2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.363532 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-kube-api-access-5cd8r" (OuterVolumeSpecName: "kube-api-access-5cd8r") pod "5e0f03ab-2e1d-406a-a4a0-c699b5a68e90" (UID: "5e0f03ab-2e1d-406a-a4a0-c699b5a68e90"). InnerVolumeSpecName "kube-api-access-5cd8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.439056 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9j2n\" (UniqueName: \"kubernetes.io/projected/3701bf21-11ac-4fea-8e61-f5b837dcf713-kube-api-access-f9j2n\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.439114 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cd8r\" (UniqueName: \"kubernetes.io/projected/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90-kube-api-access-5cd8r\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.503605 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-wt5zb" podUID="6bd77f46-f3d3-45a7-bc8e-f3de677e1583" containerName="ovn-controller" probeResult="failure" output=< Dec 10 11:13:36 crc kubenswrapper[4780]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 10 11:13:36 crc kubenswrapper[4780]: > Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.557325 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" event={"ID":"3701bf21-11ac-4fea-8e61-f5b837dcf713","Type":"ContainerDied","Data":"536da7d406aaf8c49a30dd614ebf24e32b9825003e74a6f3fdc18652dd648fd9"} Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.557385 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="536da7d406aaf8c49a30dd614ebf24e32b9825003e74a6f3fdc18652dd648fd9" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.557486 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.561589 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" event={"ID":"5e0f03ab-2e1d-406a-a4a0-c699b5a68e90","Type":"ContainerDied","Data":"d65841337cbe1e3b276fa3dbea1c00817fdac179e68e646fcc55db940f8229da"} Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.561648 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d65841337cbe1e3b276fa3dbea1c00817fdac179e68e646fcc55db940f8229da" Dec 10 11:13:36 crc kubenswrapper[4780]: I1210 11:13:36.561694 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-69b5-account-create-update-4q87k" Dec 10 11:13:37 crc kubenswrapper[4780]: I1210 11:13:37.584833 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerStarted","Data":"d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868"} Dec 10 11:13:38 crc kubenswrapper[4780]: I1210 11:13:38.602008 4780 generic.go:334] "Generic (PLEG): container finished" podID="930a45eb-72d1-4060-92de-2e348073eb16" containerID="ed7f26679a7c0d61b6e2539f857a270f9cc2f930158baaf0029c54842591e814" exitCode=0 Dec 10 11:13:38 crc kubenswrapper[4780]: I1210 11:13:38.602070 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"930a45eb-72d1-4060-92de-2e348073eb16","Type":"ContainerDied","Data":"ed7f26679a7c0d61b6e2539f857a270f9cc2f930158baaf0029c54842591e814"} Dec 10 11:13:38 crc kubenswrapper[4780]: I1210 11:13:38.796492 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:38 crc kubenswrapper[4780]: I1210 11:13:38.914558 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:39 crc kubenswrapper[4780]: I1210 11:13:39.051771 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qqjpk"] Dec 10 11:13:39 crc kubenswrapper[4780]: I1210 11:13:39.623530 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"930a45eb-72d1-4060-92de-2e348073eb16","Type":"ContainerStarted","Data":"96434be8fedd20bc464a85ec30be23748e84f67ecee82cb8881db8f915ee8ee9"} Dec 10 11:13:39 crc kubenswrapper[4780]: I1210 11:13:39.624222 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 10 11:13:39 crc kubenswrapper[4780]: I1210 11:13:39.629795 4780 generic.go:334] "Generic (PLEG): container finished" podID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerID="5b477ceaf617590d7e0355d0772b11a52ca0d161acfc933673d187471a1a5ca7" exitCode=0 Dec 10 11:13:39 crc kubenswrapper[4780]: I1210 11:13:39.631246 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae","Type":"ContainerDied","Data":"5b477ceaf617590d7e0355d0772b11a52ca0d161acfc933673d187471a1a5ca7"} Dec 10 11:13:39 crc kubenswrapper[4780]: I1210 11:13:39.665097 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=54.735540589 podStartE2EDuration="2m7.665065757s" podCreationTimestamp="2025-12-10 11:11:32 +0000 UTC" firstStartedPulling="2025-12-10 11:11:35.387170389 +0000 UTC m=+1600.240563832" lastFinishedPulling="2025-12-10 11:12:48.316695557 +0000 UTC m=+1673.170089000" observedRunningTime="2025-12-10 11:13:39.658678704 +0000 UTC m=+1724.512072167" watchObservedRunningTime="2025-12-10 11:13:39.665065757 +0000 UTC m=+1724.518459200" Dec 10 11:13:40 crc kubenswrapper[4780]: I1210 11:13:40.672341 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae","Type":"ContainerStarted","Data":"0d29a425c249ad598c9ad2f8907eb021b533375acc4ac5133b190c0761613d86"} Dec 10 11:13:40 crc kubenswrapper[4780]: I1210 11:13:40.672470 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qqjpk" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="registry-server" containerID="cri-o://b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928" gracePeriod=2 Dec 10 11:13:40 crc kubenswrapper[4780]: I1210 11:13:40.674727 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:13:40 crc kubenswrapper[4780]: I1210 11:13:40.723929 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=56.629196777 podStartE2EDuration="2m8.723893124s" podCreationTimestamp="2025-12-10 11:11:32 +0000 UTC" firstStartedPulling="2025-12-10 11:11:36.288833145 +0000 UTC m=+1601.142226588" lastFinishedPulling="2025-12-10 11:12:48.383529492 +0000 UTC m=+1673.236922935" observedRunningTime="2025-12-10 11:13:40.720826456 +0000 UTC m=+1725.574219919" watchObservedRunningTime="2025-12-10 11:13:40.723893124 +0000 UTC m=+1725.577286567" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.347103 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.489892 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-utilities\") pod \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.490000 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bws8\" (UniqueName: \"kubernetes.io/projected/a96954e9-6eb0-41ea-851e-41b5f7ef3197-kube-api-access-8bws8\") pod \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.490103 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-catalog-content\") pod \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\" (UID: \"a96954e9-6eb0-41ea-851e-41b5f7ef3197\") " Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.491285 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-utilities" (OuterVolumeSpecName: "utilities") pod "a96954e9-6eb0-41ea-851e-41b5f7ef3197" (UID: "a96954e9-6eb0-41ea-851e-41b5f7ef3197"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.503067 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a96954e9-6eb0-41ea-851e-41b5f7ef3197-kube-api-access-8bws8" (OuterVolumeSpecName: "kube-api-access-8bws8") pod "a96954e9-6eb0-41ea-851e-41b5f7ef3197" (UID: "a96954e9-6eb0-41ea-851e-41b5f7ef3197"). InnerVolumeSpecName "kube-api-access-8bws8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.508882 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.514172 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bws8\" (UniqueName: \"kubernetes.io/projected/a96954e9-6eb0-41ea-851e-41b5f7ef3197-kube-api-access-8bws8\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.552451 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a96954e9-6eb0-41ea-851e-41b5f7ef3197" (UID: "a96954e9-6eb0-41ea-851e-41b5f7ef3197"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.574698 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-wt5zb" podUID="6bd77f46-f3d3-45a7-bc8e-f3de677e1583" containerName="ovn-controller" probeResult="failure" output=< Dec 10 11:13:41 crc kubenswrapper[4780]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 10 11:13:41 crc kubenswrapper[4780]: > Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.581744 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.602346 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hzgvf" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.617687 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a96954e9-6eb0-41ea-851e-41b5f7ef3197-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.697663 4780 generic.go:334] "Generic (PLEG): container finished" podID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerID="b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928" exitCode=0 Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.697752 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqjpk" event={"ID":"a96954e9-6eb0-41ea-851e-41b5f7ef3197","Type":"ContainerDied","Data":"b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928"} Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.697832 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qqjpk" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.697860 4780 scope.go:117] "RemoveContainer" containerID="b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.697842 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqjpk" event={"ID":"a96954e9-6eb0-41ea-851e-41b5f7ef3197","Type":"ContainerDied","Data":"b06e8b06056033cf90936b727a8fc92d519418cae76feb6fe27367e02904debe"} Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.762156 4780 scope.go:117] "RemoveContainer" containerID="1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.780305 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qqjpk"] Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.809369 4780 scope.go:117] "RemoveContainer" containerID="7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.818437 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qqjpk"] Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.878933 4780 scope.go:117] "RemoveContainer" containerID="b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928" Dec 10 11:13:41 crc kubenswrapper[4780]: E1210 11:13:41.879915 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928\": container with ID starting with b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928 not found: ID does not exist" containerID="b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.879993 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928"} err="failed to get container status \"b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928\": rpc error: code = NotFound desc = could not find container \"b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928\": container with ID starting with b384815d870e7a63b510aa221c6edcb930acef418a74520e7c9e97038eaf1928 not found: ID does not exist" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.880032 4780 scope.go:117] "RemoveContainer" containerID="1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367" Dec 10 11:13:41 crc kubenswrapper[4780]: E1210 11:13:41.880669 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367\": container with ID starting with 1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367 not found: ID does not exist" containerID="1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.880694 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367"} err="failed to get container status \"1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367\": rpc error: code = NotFound desc = could not find container \"1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367\": container with ID starting with 1a87d5c149e56f4052b305d79bca3a7a6f880ea06a04fd0b678d6b1298003367 not found: ID does not exist" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.880708 4780 scope.go:117] "RemoveContainer" containerID="7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4" Dec 10 11:13:41 crc kubenswrapper[4780]: E1210 11:13:41.881752 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4\": container with ID starting with 7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4 not found: ID does not exist" containerID="7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.881775 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4"} err="failed to get container status \"7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4\": rpc error: code = NotFound desc = could not find container \"7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4\": container with ID starting with 7312f37da7d16c4b5f8dbfe267cfa48e78cb55329e85fd93fba3f09a95903db4 not found: ID does not exist" Dec 10 11:13:41 crc kubenswrapper[4780]: I1210 11:13:41.991722 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" path="/var/lib/kubelet/pods/a96954e9-6eb0-41ea-851e-41b5f7ef3197/volumes" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.059805 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-wt5zb-config-555n6"] Dec 10 11:13:42 crc kubenswrapper[4780]: E1210 11:13:42.060763 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="registry-server" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.060794 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="registry-server" Dec 10 11:13:42 crc kubenswrapper[4780]: E1210 11:13:42.060820 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72627815-752d-44a8-96cc-428f0239411d" containerName="swift-ring-rebalance" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.060830 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="72627815-752d-44a8-96cc-428f0239411d" containerName="swift-ring-rebalance" Dec 10 11:13:42 crc kubenswrapper[4780]: E1210 11:13:42.060856 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="extract-utilities" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.060864 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="extract-utilities" Dec 10 11:13:42 crc kubenswrapper[4780]: E1210 11:13:42.060896 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="extract-content" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.060905 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="extract-content" Dec 10 11:13:42 crc kubenswrapper[4780]: E1210 11:13:42.060949 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e0f03ab-2e1d-406a-a4a0-c699b5a68e90" containerName="mariadb-account-create-update" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.060958 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e0f03ab-2e1d-406a-a4a0-c699b5a68e90" containerName="mariadb-account-create-update" Dec 10 11:13:42 crc kubenswrapper[4780]: E1210 11:13:42.060984 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3701bf21-11ac-4fea-8e61-f5b837dcf713" containerName="mariadb-database-create" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.060993 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3701bf21-11ac-4fea-8e61-f5b837dcf713" containerName="mariadb-database-create" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.061314 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="3701bf21-11ac-4fea-8e61-f5b837dcf713" containerName="mariadb-database-create" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.061347 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e0f03ab-2e1d-406a-a4a0-c699b5a68e90" containerName="mariadb-account-create-update" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.061372 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="72627815-752d-44a8-96cc-428f0239411d" containerName="swift-ring-rebalance" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.061390 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="a96954e9-6eb0-41ea-851e-41b5f7ef3197" containerName="registry-server" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.062690 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.067184 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.078026 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-wt5zb-config-555n6"] Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.149223 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.149353 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-log-ovn\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.149433 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2v8x\" (UniqueName: \"kubernetes.io/projected/d3812dae-43fb-4c93-9c78-e5175065180f-kube-api-access-d2v8x\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.149529 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-scripts\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.149611 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run-ovn\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.149700 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-additional-scripts\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.227683 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.229944 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.235684 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.252341 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run-ovn\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.252535 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-additional-scripts\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.252727 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.252876 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-log-ovn\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.253045 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2v8x\" (UniqueName: \"kubernetes.io/projected/d3812dae-43fb-4c93-9c78-e5175065180f-kube-api-access-d2v8x\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.253253 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-scripts\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.254088 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run-ovn\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.254115 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.254238 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-log-ovn\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.255532 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-additional-scripts\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.259170 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.266244 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-scripts\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.289961 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2v8x\" (UniqueName: \"kubernetes.io/projected/d3812dae-43fb-4c93-9c78-e5175065180f-kube-api-access-d2v8x\") pod \"ovn-controller-wt5zb-config-555n6\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.356642 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.356786 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prgx7\" (UniqueName: \"kubernetes.io/projected/97a2ba0e-4951-4b10-812a-41d11be1bcc5-kube-api-access-prgx7\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.356829 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-config-data\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.408787 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.461117 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.461247 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prgx7\" (UniqueName: \"kubernetes.io/projected/97a2ba0e-4951-4b10-812a-41d11be1bcc5-kube-api-access-prgx7\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.461300 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-config-data\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.474504 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-config-data\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.495425 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.509254 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prgx7\" (UniqueName: \"kubernetes.io/projected/97a2ba0e-4951-4b10-812a-41d11be1bcc5-kube-api-access-prgx7\") pod \"mysqld-exporter-0\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " pod="openstack/mysqld-exporter-0" Dec 10 11:13:42 crc kubenswrapper[4780]: I1210 11:13:42.559522 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 11:13:46 crc kubenswrapper[4780]: I1210 11:13:46.500889 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-wt5zb" podUID="6bd77f46-f3d3-45a7-bc8e-f3de677e1583" containerName="ovn-controller" probeResult="failure" output=< Dec 10 11:13:46 crc kubenswrapper[4780]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 10 11:13:46 crc kubenswrapper[4780]: > Dec 10 11:13:51 crc kubenswrapper[4780]: I1210 11:13:51.529141 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-wt5zb" podUID="6bd77f46-f3d3-45a7-bc8e-f3de677e1583" containerName="ovn-controller" probeResult="failure" output=< Dec 10 11:13:51 crc kubenswrapper[4780]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 10 11:13:51 crc kubenswrapper[4780]: > Dec 10 11:13:53 crc kubenswrapper[4780]: E1210 11:13:53.504338 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Dec 10 11:13:53 crc kubenswrapper[4780]: E1210 11:13:53.506190 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kt92d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-m9l8h_openstack(626bc022-de20-4c32-ad5b-bd22a54340ce): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:13:53 crc kubenswrapper[4780]: E1210 11:13:53.507466 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-m9l8h" podUID="626bc022-de20-4c32-ad5b-bd22a54340ce" Dec 10 11:13:53 crc kubenswrapper[4780]: E1210 11:13:53.913111 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-m9l8h" podUID="626bc022-de20-4c32-ad5b-bd22a54340ce" Dec 10 11:13:53 crc kubenswrapper[4780]: I1210 11:13:53.993402 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="930a45eb-72d1-4060-92de-2e348073eb16" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Dec 10 11:13:54 crc kubenswrapper[4780]: I1210 11:13:54.322394 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Dec 10 11:13:55 crc kubenswrapper[4780]: E1210 11:13:55.123697 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4" Dec 10 11:13:55 crc kubenswrapper[4780]: E1210 11:13:55.124543 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:thanos-sidecar,Image:registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4,Command:[],Args:[sidecar --prometheus.url=http://localhost:9090/ --grpc-address=:10901 --http-address=:10902 --log.level=info --prometheus.http-client-file=/etc/thanos/config/prometheus.http-client-file.yaml],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http,HostPort:0,ContainerPort:10902,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:10901,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:thanos-prometheus-http-client-file,ReadOnly:false,MountPath:/etc/thanos/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xtssp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(bef902c7-4e5f-4af9-bda4-0c92b8521901): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 11:13:55 crc kubenswrapper[4780]: E1210 11:13:55.125831 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" Dec 10 11:13:55 crc kubenswrapper[4780]: I1210 11:13:55.639224 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-wt5zb-config-555n6"] Dec 10 11:13:55 crc kubenswrapper[4780]: W1210 11:13:55.643489 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3812dae_43fb_4c93_9c78_e5175065180f.slice/crio-7d9873f72d9bad3fbc7b95b2ccd471f909206828173d5ae2299ad6d2ce1de0ce WatchSource:0}: Error finding container 7d9873f72d9bad3fbc7b95b2ccd471f909206828173d5ae2299ad6d2ce1de0ce: Status 404 returned error can't find the container with id 7d9873f72d9bad3fbc7b95b2ccd471f909206828173d5ae2299ad6d2ce1de0ce Dec 10 11:13:55 crc kubenswrapper[4780]: I1210 11:13:55.786717 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:13:55 crc kubenswrapper[4780]: I1210 11:13:55.812515 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:13:55 crc kubenswrapper[4780]: I1210 11:13:55.943539 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wt5zb-config-555n6" event={"ID":"d3812dae-43fb-4c93-9c78-e5175065180f","Type":"ContainerStarted","Data":"7d9873f72d9bad3fbc7b95b2ccd471f909206828173d5ae2299ad6d2ce1de0ce"} Dec 10 11:13:55 crc kubenswrapper[4780]: I1210 11:13:55.957171 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"97a2ba0e-4951-4b10-812a-41d11be1bcc5","Type":"ContainerStarted","Data":"a45fd5be8adf2bbcba174c254104c4b620a487ada6aa950e25e7c30495a1bb7e"} Dec 10 11:13:55 crc kubenswrapper[4780]: E1210 11:13:55.960910 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" Dec 10 11:13:56 crc kubenswrapper[4780]: I1210 11:13:56.032638 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 10 11:13:56 crc kubenswrapper[4780]: I1210 11:13:56.511289 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-wt5zb" Dec 10 11:13:56 crc kubenswrapper[4780]: I1210 11:13:56.988682 4780 generic.go:334] "Generic (PLEG): container finished" podID="d3812dae-43fb-4c93-9c78-e5175065180f" containerID="955ce765adb3c325210d940e928848d3dd783b544db9bf9dbe0753af449c93a7" exitCode=0 Dec 10 11:13:56 crc kubenswrapper[4780]: I1210 11:13:56.991570 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wt5zb-config-555n6" event={"ID":"d3812dae-43fb-4c93-9c78-e5175065180f","Type":"ContainerDied","Data":"955ce765adb3c325210d940e928848d3dd783b544db9bf9dbe0753af449c93a7"} Dec 10 11:13:57 crc kubenswrapper[4780]: E1210 11:13:56.999902 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" Dec 10 11:13:57 crc kubenswrapper[4780]: I1210 11:13:57.475785 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:13:57 crc kubenswrapper[4780]: I1210 11:13:57.476239 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:13:57 crc kubenswrapper[4780]: I1210 11:13:57.476383 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:13:57 crc kubenswrapper[4780]: I1210 11:13:57.477795 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:13:57 crc kubenswrapper[4780]: I1210 11:13:57.477961 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" gracePeriod=600 Dec 10 11:13:57 crc kubenswrapper[4780]: E1210 11:13:57.633213 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:13:57 crc kubenswrapper[4780]: I1210 11:13:57.760297 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:13:57 crc kubenswrapper[4780]: I1210 11:13:57.772430 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3e1a1225-bdae-4dcb-b10a-02504fe590cd-etc-swift\") pod \"swift-storage-0\" (UID: \"3e1a1225-bdae-4dcb-b10a-02504fe590cd\") " pod="openstack/swift-storage-0" Dec 10 11:13:57 crc kubenswrapper[4780]: I1210 11:13:57.865062 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.049586 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" exitCode=0 Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.049852 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16"} Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.049900 4780 scope.go:117] "RemoveContainer" containerID="c3c4f9f16910550f67c3bdc81fc9c721bc946d7793a35038605a3c1b6eb79b3b" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.051200 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:13:58 crc kubenswrapper[4780]: E1210 11:13:58.051747 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.621418 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.731698 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-log-ovn\") pod \"d3812dae-43fb-4c93-9c78-e5175065180f\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.731782 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run-ovn\") pod \"d3812dae-43fb-4c93-9c78-e5175065180f\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.731872 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-scripts\") pod \"d3812dae-43fb-4c93-9c78-e5175065180f\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.731881 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "d3812dae-43fb-4c93-9c78-e5175065180f" (UID: "d3812dae-43fb-4c93-9c78-e5175065180f"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.731992 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-additional-scripts\") pod \"d3812dae-43fb-4c93-9c78-e5175065180f\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.732026 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2v8x\" (UniqueName: \"kubernetes.io/projected/d3812dae-43fb-4c93-9c78-e5175065180f-kube-api-access-d2v8x\") pod \"d3812dae-43fb-4c93-9c78-e5175065180f\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.732018 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "d3812dae-43fb-4c93-9c78-e5175065180f" (UID: "d3812dae-43fb-4c93-9c78-e5175065180f"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.732189 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run\") pod \"d3812dae-43fb-4c93-9c78-e5175065180f\" (UID: \"d3812dae-43fb-4c93-9c78-e5175065180f\") " Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.732229 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run" (OuterVolumeSpecName: "var-run") pod "d3812dae-43fb-4c93-9c78-e5175065180f" (UID: "d3812dae-43fb-4c93-9c78-e5175065180f"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.733206 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "d3812dae-43fb-4c93-9c78-e5175065180f" (UID: "d3812dae-43fb-4c93-9c78-e5175065180f"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.733231 4780 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.733252 4780 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.733264 4780 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3812dae-43fb-4c93-9c78-e5175065180f-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.733442 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-scripts" (OuterVolumeSpecName: "scripts") pod "d3812dae-43fb-4c93-9c78-e5175065180f" (UID: "d3812dae-43fb-4c93-9c78-e5175065180f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.766448 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3812dae-43fb-4c93-9c78-e5175065180f-kube-api-access-d2v8x" (OuterVolumeSpecName: "kube-api-access-d2v8x") pod "d3812dae-43fb-4c93-9c78-e5175065180f" (UID: "d3812dae-43fb-4c93-9c78-e5175065180f"). InnerVolumeSpecName "kube-api-access-d2v8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.836911 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2v8x\" (UniqueName: \"kubernetes.io/projected/d3812dae-43fb-4c93-9c78-e5175065180f-kube-api-access-d2v8x\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.837028 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:58 crc kubenswrapper[4780]: I1210 11:13:58.837045 4780 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3812dae-43fb-4c93-9c78-e5175065180f-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:13:58 crc kubenswrapper[4780]: W1210 11:13:58.993826 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e1a1225_bdae_4dcb_b10a_02504fe590cd.slice/crio-96a54bc10eac7638fdcb5601ed81c6a3192b407cdadb194757f599d9fd1fae4d WatchSource:0}: Error finding container 96a54bc10eac7638fdcb5601ed81c6a3192b407cdadb194757f599d9fd1fae4d: Status 404 returned error can't find the container with id 96a54bc10eac7638fdcb5601ed81c6a3192b407cdadb194757f599d9fd1fae4d Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.008222 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.072891 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"97a2ba0e-4951-4b10-812a-41d11be1bcc5","Type":"ContainerStarted","Data":"6d4ec7439ea09ed125953fd4e5efb1bb5b2516e71c6428aa76a2a0600818a0ad"} Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.076682 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-wt5zb-config-555n6" event={"ID":"d3812dae-43fb-4c93-9c78-e5175065180f","Type":"ContainerDied","Data":"7d9873f72d9bad3fbc7b95b2ccd471f909206828173d5ae2299ad6d2ce1de0ce"} Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.076750 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d9873f72d9bad3fbc7b95b2ccd471f909206828173d5ae2299ad6d2ce1de0ce" Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.076873 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-wt5zb-config-555n6" Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.084811 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"96a54bc10eac7638fdcb5601ed81c6a3192b407cdadb194757f599d9fd1fae4d"} Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.104636 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=14.879734929 podStartE2EDuration="17.104558996s" podCreationTimestamp="2025-12-10 11:13:42 +0000 UTC" firstStartedPulling="2025-12-10 11:13:55.812221389 +0000 UTC m=+1740.665614832" lastFinishedPulling="2025-12-10 11:13:58.037045456 +0000 UTC m=+1742.890438899" observedRunningTime="2025-12-10 11:13:59.101684482 +0000 UTC m=+1743.955077925" watchObservedRunningTime="2025-12-10 11:13:59.104558996 +0000 UTC m=+1743.957952439" Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.764679 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-wt5zb-config-555n6"] Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.775731 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-wt5zb-config-555n6"] Dec 10 11:13:59 crc kubenswrapper[4780]: I1210 11:13:59.987132 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3812dae-43fb-4c93-9c78-e5175065180f" path="/var/lib/kubelet/pods/d3812dae-43fb-4c93-9c78-e5175065180f/volumes" Dec 10 11:14:01 crc kubenswrapper[4780]: I1210 11:14:01.000370 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:01 crc kubenswrapper[4780]: E1210 11:14:01.003608 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" Dec 10 11:14:01 crc kubenswrapper[4780]: I1210 11:14:01.006028 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:01 crc kubenswrapper[4780]: I1210 11:14:01.114756 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"7bf985c138362ada2b338c20f2b1523a33dc5b7cf0e5972bbc5910959750fa6f"} Dec 10 11:14:01 crc kubenswrapper[4780]: I1210 11:14:01.114863 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"e8dda87bd312348f142c6de6aaa43b2bc4e9d24f514495c092812d3104c317f9"} Dec 10 11:14:01 crc kubenswrapper[4780]: I1210 11:14:01.117522 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:01 crc kubenswrapper[4780]: E1210 11:14:01.118892 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" Dec 10 11:14:02 crc kubenswrapper[4780]: I1210 11:14:02.140965 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"04c7a62b2a481ce849b033c07921b681cbdfdd0675d7b67a80c90307747fe4b3"} Dec 10 11:14:02 crc kubenswrapper[4780]: I1210 11:14:02.141561 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"3076efe344fcbed4850ee5a84f6c5820adb15ed90d49215fd8dc9738bf611a3b"} Dec 10 11:14:02 crc kubenswrapper[4780]: E1210 11:14:02.143968 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"thanos-sidecar\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" Dec 10 11:14:03 crc kubenswrapper[4780]: I1210 11:14:03.993308 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.320454 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.618956 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-k47wv"] Dec 10 11:14:04 crc kubenswrapper[4780]: E1210 11:14:04.620997 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3812dae-43fb-4c93-9c78-e5175065180f" containerName="ovn-config" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.626531 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3812dae-43fb-4c93-9c78-e5175065180f" containerName="ovn-config" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.627307 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3812dae-43fb-4c93-9c78-e5175065180f" containerName="ovn-config" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.628378 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.652885 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-k47wv"] Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.774572 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-b224-account-create-update-klk96"] Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.781396 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tc2h\" (UniqueName: \"kubernetes.io/projected/cce757bb-1e72-440d-97e9-63b08f5aa63b-kube-api-access-5tc2h\") pod \"barbican-db-create-k47wv\" (UID: \"cce757bb-1e72-440d-97e9-63b08f5aa63b\") " pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.781686 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cce757bb-1e72-440d-97e9-63b08f5aa63b-operator-scripts\") pod \"barbican-db-create-k47wv\" (UID: \"cce757bb-1e72-440d-97e9-63b08f5aa63b\") " pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.816180 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.816642 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b224-account-create-update-klk96"] Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.862957 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.913456 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86289\" (UniqueName: \"kubernetes.io/projected/cd7baf7c-6124-4c2f-ade4-302171c1e93f-kube-api-access-86289\") pod \"barbican-b224-account-create-update-klk96\" (UID: \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\") " pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.913644 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tc2h\" (UniqueName: \"kubernetes.io/projected/cce757bb-1e72-440d-97e9-63b08f5aa63b-kube-api-access-5tc2h\") pod \"barbican-db-create-k47wv\" (UID: \"cce757bb-1e72-440d-97e9-63b08f5aa63b\") " pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.913749 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd7baf7c-6124-4c2f-ade4-302171c1e93f-operator-scripts\") pod \"barbican-b224-account-create-update-klk96\" (UID: \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\") " pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.920286 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cce757bb-1e72-440d-97e9-63b08f5aa63b-operator-scripts\") pod \"barbican-db-create-k47wv\" (UID: \"cce757bb-1e72-440d-97e9-63b08f5aa63b\") " pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:04 crc kubenswrapper[4780]: I1210 11:14:04.938954 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cce757bb-1e72-440d-97e9-63b08f5aa63b-operator-scripts\") pod \"barbican-db-create-k47wv\" (UID: \"cce757bb-1e72-440d-97e9-63b08f5aa63b\") " pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.030047 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tc2h\" (UniqueName: \"kubernetes.io/projected/cce757bb-1e72-440d-97e9-63b08f5aa63b-kube-api-access-5tc2h\") pod \"barbican-db-create-k47wv\" (UID: \"cce757bb-1e72-440d-97e9-63b08f5aa63b\") " pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.040822 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86289\" (UniqueName: \"kubernetes.io/projected/cd7baf7c-6124-4c2f-ade4-302171c1e93f-kube-api-access-86289\") pod \"barbican-b224-account-create-update-klk96\" (UID: \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\") " pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.041181 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd7baf7c-6124-4c2f-ade4-302171c1e93f-operator-scripts\") pod \"barbican-b224-account-create-update-klk96\" (UID: \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\") " pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.042623 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd7baf7c-6124-4c2f-ade4-302171c1e93f-operator-scripts\") pod \"barbican-b224-account-create-update-klk96\" (UID: \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\") " pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.089100 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-mnfzx"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.104016 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.127225 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-mnfzx"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.165579 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-b379-account-create-update-7m27l"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.177100 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86289\" (UniqueName: \"kubernetes.io/projected/cd7baf7c-6124-4c2f-ade4-302171c1e93f-kube-api-access-86289\") pod \"barbican-b224-account-create-update-klk96\" (UID: \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\") " pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.177192 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.233147 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.246884 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.249102 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2fc8\" (UniqueName: \"kubernetes.io/projected/33cacd55-bfbd-437d-9a42-0a883b479efa-kube-api-access-g2fc8\") pod \"heat-db-create-mnfzx\" (UID: \"33cacd55-bfbd-437d-9a42-0a883b479efa\") " pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.249341 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33cacd55-bfbd-437d-9a42-0a883b479efa-operator-scripts\") pod \"heat-db-create-mnfzx\" (UID: \"33cacd55-bfbd-437d-9a42-0a883b479efa\") " pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.258117 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-b379-account-create-update-7m27l"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.292637 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.357885 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbq5b\" (UniqueName: \"kubernetes.io/projected/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-kube-api-access-jbq5b\") pod \"heat-b379-account-create-update-7m27l\" (UID: \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\") " pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.358241 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33cacd55-bfbd-437d-9a42-0a883b479efa-operator-scripts\") pod \"heat-db-create-mnfzx\" (UID: \"33cacd55-bfbd-437d-9a42-0a883b479efa\") " pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.358352 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-operator-scripts\") pod \"heat-b379-account-create-update-7m27l\" (UID: \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\") " pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.358620 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2fc8\" (UniqueName: \"kubernetes.io/projected/33cacd55-bfbd-437d-9a42-0a883b479efa-kube-api-access-g2fc8\") pod \"heat-db-create-mnfzx\" (UID: \"33cacd55-bfbd-437d-9a42-0a883b479efa\") " pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.359461 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33cacd55-bfbd-437d-9a42-0a883b479efa-operator-scripts\") pod \"heat-db-create-mnfzx\" (UID: \"33cacd55-bfbd-437d-9a42-0a883b479efa\") " pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.399882 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2fc8\" (UniqueName: \"kubernetes.io/projected/33cacd55-bfbd-437d-9a42-0a883b479efa-kube-api-access-g2fc8\") pod \"heat-db-create-mnfzx\" (UID: \"33cacd55-bfbd-437d-9a42-0a883b479efa\") " pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.443738 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.462473 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-operator-scripts\") pod \"heat-b379-account-create-update-7m27l\" (UID: \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\") " pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.463071 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbq5b\" (UniqueName: \"kubernetes.io/projected/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-kube-api-access-jbq5b\") pod \"heat-b379-account-create-update-7m27l\" (UID: \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\") " pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.477738 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-operator-scripts\") pod \"heat-b379-account-create-update-7m27l\" (UID: \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\") " pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.523683 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbq5b\" (UniqueName: \"kubernetes.io/projected/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-kube-api-access-jbq5b\") pod \"heat-b379-account-create-update-7m27l\" (UID: \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\") " pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.561278 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-dbhzs"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.563185 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.581342 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-5b53-account-create-update-rczqg"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.585563 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.592617 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.602490 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.651307 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-dbhzs"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.703551 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5jkm\" (UniqueName: \"kubernetes.io/projected/773f068b-e55b-4207-b91b-3fe664bfaec5-kube-api-access-p5jkm\") pod \"cinder-db-create-dbhzs\" (UID: \"773f068b-e55b-4207-b91b-3fe664bfaec5\") " pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.704006 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/773f068b-e55b-4207-b91b-3fe664bfaec5-operator-scripts\") pod \"cinder-db-create-dbhzs\" (UID: \"773f068b-e55b-4207-b91b-3fe664bfaec5\") " pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.704259 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5b53-account-create-update-rczqg"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.813659 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5jkm\" (UniqueName: \"kubernetes.io/projected/773f068b-e55b-4207-b91b-3fe664bfaec5-kube-api-access-p5jkm\") pod \"cinder-db-create-dbhzs\" (UID: \"773f068b-e55b-4207-b91b-3fe664bfaec5\") " pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.814101 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lktwr\" (UniqueName: \"kubernetes.io/projected/b734f2d8-19e5-4d25-882b-0f3a468dcde7-kube-api-access-lktwr\") pod \"cinder-5b53-account-create-update-rczqg\" (UID: \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\") " pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.814301 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/773f068b-e55b-4207-b91b-3fe664bfaec5-operator-scripts\") pod \"cinder-db-create-dbhzs\" (UID: \"773f068b-e55b-4207-b91b-3fe664bfaec5\") " pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.814345 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b734f2d8-19e5-4d25-882b-0f3a468dcde7-operator-scripts\") pod \"cinder-5b53-account-create-update-rczqg\" (UID: \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\") " pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.815934 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/773f068b-e55b-4207-b91b-3fe664bfaec5-operator-scripts\") pod \"cinder-db-create-dbhzs\" (UID: \"773f068b-e55b-4207-b91b-3fe664bfaec5\") " pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.829042 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-td4vw"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.835183 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.850614 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.850877 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.852365 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-86tfj" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.852521 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.872106 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5jkm\" (UniqueName: \"kubernetes.io/projected/773f068b-e55b-4207-b91b-3fe664bfaec5-kube-api-access-p5jkm\") pod \"cinder-db-create-dbhzs\" (UID: \"773f068b-e55b-4207-b91b-3fe664bfaec5\") " pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.895700 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-g2bln"] Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.898075 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.937955 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-operator-scripts\") pod \"neutron-db-create-g2bln\" (UID: \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\") " pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.938122 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b734f2d8-19e5-4d25-882b-0f3a468dcde7-operator-scripts\") pod \"cinder-5b53-account-create-update-rczqg\" (UID: \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\") " pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.938221 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lktwr\" (UniqueName: \"kubernetes.io/projected/b734f2d8-19e5-4d25-882b-0f3a468dcde7-kube-api-access-lktwr\") pod \"cinder-5b53-account-create-update-rczqg\" (UID: \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\") " pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.938311 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p7mw\" (UniqueName: \"kubernetes.io/projected/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-kube-api-access-7p7mw\") pod \"neutron-db-create-g2bln\" (UID: \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\") " pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:05 crc kubenswrapper[4780]: I1210 11:14:05.939485 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b734f2d8-19e5-4d25-882b-0f3a468dcde7-operator-scripts\") pod \"cinder-5b53-account-create-update-rczqg\" (UID: \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\") " pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.005710 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.054157 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-config-data\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.054239 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmztv\" (UniqueName: \"kubernetes.io/projected/d94adae5-67fa-4707-9139-8bd4537a7e77-kube-api-access-zmztv\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.054281 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p7mw\" (UniqueName: \"kubernetes.io/projected/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-kube-api-access-7p7mw\") pod \"neutron-db-create-g2bln\" (UID: \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\") " pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.054332 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-operator-scripts\") pod \"neutron-db-create-g2bln\" (UID: \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\") " pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.054349 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-combined-ca-bundle\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.097098 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lktwr\" (UniqueName: \"kubernetes.io/projected/b734f2d8-19e5-4d25-882b-0f3a468dcde7-kube-api-access-lktwr\") pod \"cinder-5b53-account-create-update-rczqg\" (UID: \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\") " pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.119071 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-operator-scripts\") pod \"neutron-db-create-g2bln\" (UID: \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\") " pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.152569 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-td4vw"] Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.165986 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-g2bln"] Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.188367 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-combined-ca-bundle\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.189311 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-config-data\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.189519 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmztv\" (UniqueName: \"kubernetes.io/projected/d94adae5-67fa-4707-9139-8bd4537a7e77-kube-api-access-zmztv\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.210506 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p7mw\" (UniqueName: \"kubernetes.io/projected/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-kube-api-access-7p7mw\") pod \"neutron-db-create-g2bln\" (UID: \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\") " pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.220992 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-config-data\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.232724 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-combined-ca-bundle\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.278577 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmztv\" (UniqueName: \"kubernetes.io/projected/d94adae5-67fa-4707-9139-8bd4537a7e77-kube-api-access-zmztv\") pod \"keystone-db-sync-td4vw\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.291948 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.324110 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-e8d6-account-create-update-75x74"] Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.326453 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.328506 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-e8d6-account-create-update-75x74"] Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.329995 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.332571 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.409003 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4qpg\" (UniqueName: \"kubernetes.io/projected/57e27a9d-d862-41f8-b10d-b28fb268f91c-kube-api-access-f4qpg\") pod \"neutron-e8d6-account-create-update-75x74\" (UID: \"57e27a9d-d862-41f8-b10d-b28fb268f91c\") " pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.409183 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57e27a9d-d862-41f8-b10d-b28fb268f91c-operator-scripts\") pod \"neutron-e8d6-account-create-update-75x74\" (UID: \"57e27a9d-d862-41f8-b10d-b28fb268f91c\") " pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.439806 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="c31145f5-6188-4934-8ceb-a86ac4a0e997" containerName="galera" probeResult="failure" output="command timed out" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.453139 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.513686 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4qpg\" (UniqueName: \"kubernetes.io/projected/57e27a9d-d862-41f8-b10d-b28fb268f91c-kube-api-access-f4qpg\") pod \"neutron-e8d6-account-create-update-75x74\" (UID: \"57e27a9d-d862-41f8-b10d-b28fb268f91c\") " pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.514708 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57e27a9d-d862-41f8-b10d-b28fb268f91c-operator-scripts\") pod \"neutron-e8d6-account-create-update-75x74\" (UID: \"57e27a9d-d862-41f8-b10d-b28fb268f91c\") " pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.517654 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57e27a9d-d862-41f8-b10d-b28fb268f91c-operator-scripts\") pod \"neutron-e8d6-account-create-update-75x74\" (UID: \"57e27a9d-d862-41f8-b10d-b28fb268f91c\") " pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.555410 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4qpg\" (UniqueName: \"kubernetes.io/projected/57e27a9d-d862-41f8-b10d-b28fb268f91c-kube-api-access-f4qpg\") pod \"neutron-e8d6-account-create-update-75x74\" (UID: \"57e27a9d-d862-41f8-b10d-b28fb268f91c\") " pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.748402 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:06 crc kubenswrapper[4780]: I1210 11:14:06.984190 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-k47wv"] Dec 10 11:14:07 crc kubenswrapper[4780]: I1210 11:14:07.433442 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-b379-account-create-update-7m27l"] Dec 10 11:14:07 crc kubenswrapper[4780]: I1210 11:14:07.436089 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k47wv" event={"ID":"cce757bb-1e72-440d-97e9-63b08f5aa63b","Type":"ContainerStarted","Data":"5a16baf008fdc53588198b85dc49e2cc0678c597a10c76d93a1ba6d1c897c746"} Dec 10 11:14:07 crc kubenswrapper[4780]: I1210 11:14:07.457226 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b224-account-create-update-klk96"] Dec 10 11:14:07 crc kubenswrapper[4780]: W1210 11:14:07.481249 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d7da3d7_458f_4cd4_8a91_432c7395bdcc.slice/crio-189915b394ab0826778317a319e736634ff2620c852a567e52290f0ce747a41e WatchSource:0}: Error finding container 189915b394ab0826778317a319e736634ff2620c852a567e52290f0ce747a41e: Status 404 returned error can't find the container with id 189915b394ab0826778317a319e736634ff2620c852a567e52290f0ce747a41e Dec 10 11:14:07 crc kubenswrapper[4780]: W1210 11:14:07.551467 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd7baf7c_6124_4c2f_ade4_302171c1e93f.slice/crio-da4c10cafc65a50f16971b613daafcbbc994ac8cbdd84d0d6c8f233ac0684ca9 WatchSource:0}: Error finding container da4c10cafc65a50f16971b613daafcbbc994ac8cbdd84d0d6c8f233ac0684ca9: Status 404 returned error can't find the container with id da4c10cafc65a50f16971b613daafcbbc994ac8cbdd84d0d6c8f233ac0684ca9 Dec 10 11:14:07 crc kubenswrapper[4780]: I1210 11:14:07.885795 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-mnfzx"] Dec 10 11:14:07 crc kubenswrapper[4780]: I1210 11:14:07.910562 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-dbhzs"] Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.062551 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-td4vw"] Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.088867 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5b53-account-create-update-rczqg"] Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.097969 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-g2bln"] Dec 10 11:14:08 crc kubenswrapper[4780]: W1210 11:14:08.108502 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2acffbad_6bc8_4c72_b800_c94b25aeb1bc.slice/crio-6a04e37a62aca374ca07086077c8f3b9cc2e26bb7c3e09d9f66d08f6361fed17 WatchSource:0}: Error finding container 6a04e37a62aca374ca07086077c8f3b9cc2e26bb7c3e09d9f66d08f6361fed17: Status 404 returned error can't find the container with id 6a04e37a62aca374ca07086077c8f3b9cc2e26bb7c3e09d9f66d08f6361fed17 Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.256948 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-e8d6-account-create-update-75x74"] Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.501000 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-b379-account-create-update-7m27l" event={"ID":"4d7da3d7-458f-4cd4-8a91-432c7395bdcc","Type":"ContainerStarted","Data":"27ce8d4d83dab1897d1108d83fbe0df7c06ef7cf970b5cb8416b7c1cd68fdf4d"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.501077 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-b379-account-create-update-7m27l" event={"ID":"4d7da3d7-458f-4cd4-8a91-432c7395bdcc","Type":"ContainerStarted","Data":"189915b394ab0826778317a319e736634ff2620c852a567e52290f0ce747a41e"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.511358 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-mnfzx" event={"ID":"33cacd55-bfbd-437d-9a42-0a883b479efa","Type":"ContainerStarted","Data":"8f3c4cda3084416cd5594defabd6b781e5ba1a8ae81a08847fbb5a9063ef992d"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.526524 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5b53-account-create-update-rczqg" event={"ID":"b734f2d8-19e5-4d25-882b-0f3a468dcde7","Type":"ContainerStarted","Data":"c2c7bf11f6ef0d08f059e167b104aaa1ffe47d13ba80cea6bf7ce7004e2cc60c"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.531636 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-td4vw" event={"ID":"d94adae5-67fa-4707-9139-8bd4537a7e77","Type":"ContainerStarted","Data":"fe31ab5b6b1a30f201d932d20c3308c4a2acadfc37e3957461ca63ba944d0503"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.570529 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-b379-account-create-update-7m27l" podStartSLOduration=3.570496 podStartE2EDuration="3.570496s" podCreationTimestamp="2025-12-10 11:14:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:08.542729422 +0000 UTC m=+1753.396122865" watchObservedRunningTime="2025-12-10 11:14:08.570496 +0000 UTC m=+1753.423889443" Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.573083 4780 generic.go:334] "Generic (PLEG): container finished" podID="cce757bb-1e72-440d-97e9-63b08f5aa63b" containerID="c3498a5c7e74020e0637f139a6238376da4b144ea8ae931f302b2b46fb07c135" exitCode=0 Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.573227 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k47wv" event={"ID":"cce757bb-1e72-440d-97e9-63b08f5aa63b","Type":"ContainerDied","Data":"c3498a5c7e74020e0637f139a6238376da4b144ea8ae931f302b2b46fb07c135"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.577981 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-g2bln" event={"ID":"2acffbad-6bc8-4c72-b800-c94b25aeb1bc","Type":"ContainerStarted","Data":"6a04e37a62aca374ca07086077c8f3b9cc2e26bb7c3e09d9f66d08f6361fed17"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.580358 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dbhzs" event={"ID":"773f068b-e55b-4207-b91b-3fe664bfaec5","Type":"ContainerStarted","Data":"06ee366c9a5d3ff9200a0992f314a8f2447f871b915518dd6c2dc1530189568d"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.585086 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-e8d6-account-create-update-75x74" event={"ID":"57e27a9d-d862-41f8-b10d-b28fb268f91c","Type":"ContainerStarted","Data":"ee12a833a041184b263e94cdb107db80608b2ef23f6efc748dd8cc1920706d4c"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.599058 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b224-account-create-update-klk96" event={"ID":"cd7baf7c-6124-4c2f-ade4-302171c1e93f","Type":"ContainerStarted","Data":"4317dfe2f5cc63cd1b8ee6d404454d8bc94a82afc79a7a33c8e583f41a216306"} Dec 10 11:14:08 crc kubenswrapper[4780]: I1210 11:14:08.599151 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b224-account-create-update-klk96" event={"ID":"cd7baf7c-6124-4c2f-ade4-302171c1e93f","Type":"ContainerStarted","Data":"da4c10cafc65a50f16971b613daafcbbc994ac8cbdd84d0d6c8f233ac0684ca9"} Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.656982 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-g2bln" event={"ID":"2acffbad-6bc8-4c72-b800-c94b25aeb1bc","Type":"ContainerStarted","Data":"fc8064f2fffd4a43901b5f8b7568dcfc910c5ad47bc6953e2059698b789f2907"} Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.682362 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-mnfzx" event={"ID":"33cacd55-bfbd-437d-9a42-0a883b479efa","Type":"ContainerStarted","Data":"1c2623bba33440e660cbfa0e9390743929e58f0f5a740cec224c847161ea0be1"} Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.687463 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-b224-account-create-update-klk96" podStartSLOduration=5.68741329 podStartE2EDuration="5.68741329s" podCreationTimestamp="2025-12-10 11:14:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:08.698800883 +0000 UTC m=+1753.552194376" watchObservedRunningTime="2025-12-10 11:14:09.68741329 +0000 UTC m=+1754.540806743" Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.737605 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dbhzs" event={"ID":"773f068b-e55b-4207-b91b-3fe664bfaec5","Type":"ContainerStarted","Data":"55dd627b492eb039320b792bcaa7636eaa93d533d9ca07479d6db5284a53cb0a"} Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.739304 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-mnfzx" podStartSLOduration=5.739272523 podStartE2EDuration="5.739272523s" podCreationTimestamp="2025-12-10 11:14:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:09.719303823 +0000 UTC m=+1754.572697266" watchObservedRunningTime="2025-12-10 11:14:09.739272523 +0000 UTC m=+1754.592665966" Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.749107 4780 generic.go:334] "Generic (PLEG): container finished" podID="cd7baf7c-6124-4c2f-ade4-302171c1e93f" containerID="4317dfe2f5cc63cd1b8ee6d404454d8bc94a82afc79a7a33c8e583f41a216306" exitCode=0 Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.749403 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b224-account-create-update-klk96" event={"ID":"cd7baf7c-6124-4c2f-ade4-302171c1e93f","Type":"ContainerDied","Data":"4317dfe2f5cc63cd1b8ee6d404454d8bc94a82afc79a7a33c8e583f41a216306"} Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.762326 4780 generic.go:334] "Generic (PLEG): container finished" podID="4d7da3d7-458f-4cd4-8a91-432c7395bdcc" containerID="27ce8d4d83dab1897d1108d83fbe0df7c06ef7cf970b5cb8416b7c1cd68fdf4d" exitCode=0 Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.762477 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-b379-account-create-update-7m27l" event={"ID":"4d7da3d7-458f-4cd4-8a91-432c7395bdcc","Type":"ContainerDied","Data":"27ce8d4d83dab1897d1108d83fbe0df7c06ef7cf970b5cb8416b7c1cd68fdf4d"} Dec 10 11:14:09 crc kubenswrapper[4780]: I1210 11:14:09.783498 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-dbhzs" podStartSLOduration=4.78346695 podStartE2EDuration="4.78346695s" podCreationTimestamp="2025-12-10 11:14:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:09.769015361 +0000 UTC m=+1754.622408814" watchObservedRunningTime="2025-12-10 11:14:09.78346695 +0000 UTC m=+1754.636860393" Dec 10 11:14:10 crc kubenswrapper[4780]: I1210 11:14:10.779080 4780 generic.go:334] "Generic (PLEG): container finished" podID="773f068b-e55b-4207-b91b-3fe664bfaec5" containerID="55dd627b492eb039320b792bcaa7636eaa93d533d9ca07479d6db5284a53cb0a" exitCode=0 Dec 10 11:14:10 crc kubenswrapper[4780]: I1210 11:14:10.779167 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dbhzs" event={"ID":"773f068b-e55b-4207-b91b-3fe664bfaec5","Type":"ContainerDied","Data":"55dd627b492eb039320b792bcaa7636eaa93d533d9ca07479d6db5284a53cb0a"} Dec 10 11:14:10 crc kubenswrapper[4780]: I1210 11:14:10.782634 4780 generic.go:334] "Generic (PLEG): container finished" podID="2acffbad-6bc8-4c72-b800-c94b25aeb1bc" containerID="fc8064f2fffd4a43901b5f8b7568dcfc910c5ad47bc6953e2059698b789f2907" exitCode=0 Dec 10 11:14:10 crc kubenswrapper[4780]: I1210 11:14:10.782821 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-g2bln" event={"ID":"2acffbad-6bc8-4c72-b800-c94b25aeb1bc","Type":"ContainerDied","Data":"fc8064f2fffd4a43901b5f8b7568dcfc910c5ad47bc6953e2059698b789f2907"} Dec 10 11:14:11 crc kubenswrapper[4780]: I1210 11:14:11.959780 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:14:11 crc kubenswrapper[4780]: E1210 11:14:11.964862 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.661565 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.819334 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dbhzs" event={"ID":"773f068b-e55b-4207-b91b-3fe664bfaec5","Type":"ContainerDied","Data":"06ee366c9a5d3ff9200a0992f314a8f2447f871b915518dd6c2dc1530189568d"} Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.819974 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06ee366c9a5d3ff9200a0992f314a8f2447f871b915518dd6c2dc1530189568d" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.822963 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b224-account-create-update-klk96" event={"ID":"cd7baf7c-6124-4c2f-ade4-302171c1e93f","Type":"ContainerDied","Data":"da4c10cafc65a50f16971b613daafcbbc994ac8cbdd84d0d6c8f233ac0684ca9"} Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.823007 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da4c10cafc65a50f16971b613daafcbbc994ac8cbdd84d0d6c8f233ac0684ca9" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.827729 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k47wv" event={"ID":"cce757bb-1e72-440d-97e9-63b08f5aa63b","Type":"ContainerDied","Data":"5a16baf008fdc53588198b85dc49e2cc0678c597a10c76d93a1ba6d1c897c746"} Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.827786 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a16baf008fdc53588198b85dc49e2cc0678c597a10c76d93a1ba6d1c897c746" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.835611 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-b379-account-create-update-7m27l" event={"ID":"4d7da3d7-458f-4cd4-8a91-432c7395bdcc","Type":"ContainerDied","Data":"189915b394ab0826778317a319e736634ff2620c852a567e52290f0ce747a41e"} Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.836301 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="189915b394ab0826778317a319e736634ff2620c852a567e52290f0ce747a41e" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.838135 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7p7mw\" (UniqueName: \"kubernetes.io/projected/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-kube-api-access-7p7mw\") pod \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\" (UID: \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\") " Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.838425 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-operator-scripts\") pod \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\" (UID: \"2acffbad-6bc8-4c72-b800-c94b25aeb1bc\") " Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.840655 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2acffbad-6bc8-4c72-b800-c94b25aeb1bc" (UID: "2acffbad-6bc8-4c72-b800-c94b25aeb1bc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.855966 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-kube-api-access-7p7mw" (OuterVolumeSpecName: "kube-api-access-7p7mw") pod "2acffbad-6bc8-4c72-b800-c94b25aeb1bc" (UID: "2acffbad-6bc8-4c72-b800-c94b25aeb1bc"). InnerVolumeSpecName "kube-api-access-7p7mw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.859044 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.860569 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-g2bln" event={"ID":"2acffbad-6bc8-4c72-b800-c94b25aeb1bc","Type":"ContainerDied","Data":"6a04e37a62aca374ca07086077c8f3b9cc2e26bb7c3e09d9f66d08f6361fed17"} Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.860653 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a04e37a62aca374ca07086077c8f3b9cc2e26bb7c3e09d9f66d08f6361fed17" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.860827 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-g2bln" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.862785 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.869418 4780 generic.go:334] "Generic (PLEG): container finished" podID="33cacd55-bfbd-437d-9a42-0a883b479efa" containerID="1c2623bba33440e660cbfa0e9390743929e58f0f5a740cec224c847161ea0be1" exitCode=0 Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.869521 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-mnfzx" event={"ID":"33cacd55-bfbd-437d-9a42-0a883b479efa","Type":"ContainerDied","Data":"1c2623bba33440e660cbfa0e9390743929e58f0f5a740cec224c847161ea0be1"} Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.885273 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7p7mw\" (UniqueName: \"kubernetes.io/projected/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-kube-api-access-7p7mw\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.886721 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2acffbad-6bc8-4c72-b800-c94b25aeb1bc-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.896173 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.994526 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd7baf7c-6124-4c2f-ade4-302171c1e93f-operator-scripts\") pod \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\" (UID: \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\") " Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.994732 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbq5b\" (UniqueName: \"kubernetes.io/projected/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-kube-api-access-jbq5b\") pod \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\" (UID: \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\") " Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.994911 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cce757bb-1e72-440d-97e9-63b08f5aa63b-operator-scripts\") pod \"cce757bb-1e72-440d-97e9-63b08f5aa63b\" (UID: \"cce757bb-1e72-440d-97e9-63b08f5aa63b\") " Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.994993 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86289\" (UniqueName: \"kubernetes.io/projected/cd7baf7c-6124-4c2f-ade4-302171c1e93f-kube-api-access-86289\") pod \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\" (UID: \"cd7baf7c-6124-4c2f-ade4-302171c1e93f\") " Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.995030 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tc2h\" (UniqueName: \"kubernetes.io/projected/cce757bb-1e72-440d-97e9-63b08f5aa63b-kube-api-access-5tc2h\") pod \"cce757bb-1e72-440d-97e9-63b08f5aa63b\" (UID: \"cce757bb-1e72-440d-97e9-63b08f5aa63b\") " Dec 10 11:14:12 crc kubenswrapper[4780]: I1210 11:14:12.995161 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-operator-scripts\") pod \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\" (UID: \"4d7da3d7-458f-4cd4-8a91-432c7395bdcc\") " Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:12.997505 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4d7da3d7-458f-4cd4-8a91-432c7395bdcc" (UID: "4d7da3d7-458f-4cd4-8a91-432c7395bdcc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.007632 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.009568 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd7baf7c-6124-4c2f-ade4-302171c1e93f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cd7baf7c-6124-4c2f-ade4-302171c1e93f" (UID: "cd7baf7c-6124-4c2f-ade4-302171c1e93f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.010742 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cce757bb-1e72-440d-97e9-63b08f5aa63b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cce757bb-1e72-440d-97e9-63b08f5aa63b" (UID: "cce757bb-1e72-440d-97e9-63b08f5aa63b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.015422 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-kube-api-access-jbq5b" (OuterVolumeSpecName: "kube-api-access-jbq5b") pod "4d7da3d7-458f-4cd4-8a91-432c7395bdcc" (UID: "4d7da3d7-458f-4cd4-8a91-432c7395bdcc"). InnerVolumeSpecName "kube-api-access-jbq5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.020341 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd7baf7c-6124-4c2f-ade4-302171c1e93f-kube-api-access-86289" (OuterVolumeSpecName: "kube-api-access-86289") pod "cd7baf7c-6124-4c2f-ade4-302171c1e93f" (UID: "cd7baf7c-6124-4c2f-ade4-302171c1e93f"). InnerVolumeSpecName "kube-api-access-86289". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.022326 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cce757bb-1e72-440d-97e9-63b08f5aa63b-kube-api-access-5tc2h" (OuterVolumeSpecName: "kube-api-access-5tc2h") pod "cce757bb-1e72-440d-97e9-63b08f5aa63b" (UID: "cce757bb-1e72-440d-97e9-63b08f5aa63b"). InnerVolumeSpecName "kube-api-access-5tc2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.098852 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/773f068b-e55b-4207-b91b-3fe664bfaec5-operator-scripts\") pod \"773f068b-e55b-4207-b91b-3fe664bfaec5\" (UID: \"773f068b-e55b-4207-b91b-3fe664bfaec5\") " Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.099159 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5jkm\" (UniqueName: \"kubernetes.io/projected/773f068b-e55b-4207-b91b-3fe664bfaec5-kube-api-access-p5jkm\") pod \"773f068b-e55b-4207-b91b-3fe664bfaec5\" (UID: \"773f068b-e55b-4207-b91b-3fe664bfaec5\") " Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.099963 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/773f068b-e55b-4207-b91b-3fe664bfaec5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "773f068b-e55b-4207-b91b-3fe664bfaec5" (UID: "773f068b-e55b-4207-b91b-3fe664bfaec5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.104949 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/773f068b-e55b-4207-b91b-3fe664bfaec5-kube-api-access-p5jkm" (OuterVolumeSpecName: "kube-api-access-p5jkm") pod "773f068b-e55b-4207-b91b-3fe664bfaec5" (UID: "773f068b-e55b-4207-b91b-3fe664bfaec5"). InnerVolumeSpecName "kube-api-access-p5jkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.106040 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbq5b\" (UniqueName: \"kubernetes.io/projected/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-kube-api-access-jbq5b\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.106141 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5jkm\" (UniqueName: \"kubernetes.io/projected/773f068b-e55b-4207-b91b-3fe664bfaec5-kube-api-access-p5jkm\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.106203 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cce757bb-1e72-440d-97e9-63b08f5aa63b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.106215 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86289\" (UniqueName: \"kubernetes.io/projected/cd7baf7c-6124-4c2f-ade4-302171c1e93f-kube-api-access-86289\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.106225 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tc2h\" (UniqueName: \"kubernetes.io/projected/cce757bb-1e72-440d-97e9-63b08f5aa63b-kube-api-access-5tc2h\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.106237 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d7da3d7-458f-4cd4-8a91-432c7395bdcc-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.106252 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/773f068b-e55b-4207-b91b-3fe664bfaec5-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.106266 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd7baf7c-6124-4c2f-ade4-302171c1e93f-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.899951 4780 generic.go:334] "Generic (PLEG): container finished" podID="57e27a9d-d862-41f8-b10d-b28fb268f91c" containerID="261a7961c51677ddc56262ede3057a53c0782d8ce07324f9493deb6671633f93" exitCode=0 Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.900062 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-e8d6-account-create-update-75x74" event={"ID":"57e27a9d-d862-41f8-b10d-b28fb268f91c","Type":"ContainerDied","Data":"261a7961c51677ddc56262ede3057a53c0782d8ce07324f9493deb6671633f93"} Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.912755 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m9l8h" event={"ID":"626bc022-de20-4c32-ad5b-bd22a54340ce","Type":"ContainerStarted","Data":"c0507c16b5ffcf039df86cb1cef6f8cb84cd34ce350c52b957e2bdc58d9d3487"} Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.987760 4780 generic.go:334] "Generic (PLEG): container finished" podID="b734f2d8-19e5-4d25-882b-0f3a468dcde7" containerID="7bc4e3bae0bf3baf0d6683cdd1adb229b90ea53d949500e9891ac05e2323d784" exitCode=0 Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.988521 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dbhzs" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.988726 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k47wv" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.988835 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b224-account-create-update-klk96" Dec 10 11:14:13 crc kubenswrapper[4780]: I1210 11:14:13.990755 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-b379-account-create-update-7m27l" Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.027258 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"4758ea8ebb39e4ca9510291ee781f122aa9c050ca5a1a3473e02dc08d641f1bd"} Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.027342 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"02e0a45abfa46b57e416434cdb254b6c6636233de09e67aa9bcd2cb599ef7ec9"} Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.027356 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"c63d61085bc58afbfd511974c4e9da177c9ba1be6bc72ea8dc4803c18ce784d8"} Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.027368 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5b53-account-create-update-rczqg" event={"ID":"b734f2d8-19e5-4d25-882b-0f3a468dcde7","Type":"ContainerDied","Data":"7bc4e3bae0bf3baf0d6683cdd1adb229b90ea53d949500e9891ac05e2323d784"} Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.077379 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-m9l8h" podStartSLOduration=7.55291097 podStartE2EDuration="46.077343113s" podCreationTimestamp="2025-12-10 11:13:28 +0000 UTC" firstStartedPulling="2025-12-10 11:13:34.121043607 +0000 UTC m=+1718.974437050" lastFinishedPulling="2025-12-10 11:14:12.64547574 +0000 UTC m=+1757.498869193" observedRunningTime="2025-12-10 11:14:13.963473598 +0000 UTC m=+1758.816867041" watchObservedRunningTime="2025-12-10 11:14:14.077343113 +0000 UTC m=+1758.930736556" Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.583703 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.679054 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33cacd55-bfbd-437d-9a42-0a883b479efa-operator-scripts\") pod \"33cacd55-bfbd-437d-9a42-0a883b479efa\" (UID: \"33cacd55-bfbd-437d-9a42-0a883b479efa\") " Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.679421 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2fc8\" (UniqueName: \"kubernetes.io/projected/33cacd55-bfbd-437d-9a42-0a883b479efa-kube-api-access-g2fc8\") pod \"33cacd55-bfbd-437d-9a42-0a883b479efa\" (UID: \"33cacd55-bfbd-437d-9a42-0a883b479efa\") " Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.681088 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33cacd55-bfbd-437d-9a42-0a883b479efa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "33cacd55-bfbd-437d-9a42-0a883b479efa" (UID: "33cacd55-bfbd-437d-9a42-0a883b479efa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.700573 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33cacd55-bfbd-437d-9a42-0a883b479efa-kube-api-access-g2fc8" (OuterVolumeSpecName: "kube-api-access-g2fc8") pod "33cacd55-bfbd-437d-9a42-0a883b479efa" (UID: "33cacd55-bfbd-437d-9a42-0a883b479efa"). InnerVolumeSpecName "kube-api-access-g2fc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.783516 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/33cacd55-bfbd-437d-9a42-0a883b479efa-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:14 crc kubenswrapper[4780]: I1210 11:14:14.784076 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2fc8\" (UniqueName: \"kubernetes.io/projected/33cacd55-bfbd-437d-9a42-0a883b479efa-kube-api-access-g2fc8\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.012639 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-mnfzx" Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.012682 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-mnfzx" event={"ID":"33cacd55-bfbd-437d-9a42-0a883b479efa","Type":"ContainerDied","Data":"8f3c4cda3084416cd5594defabd6b781e5ba1a8ae81a08847fbb5a9063ef992d"} Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.012782 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f3c4cda3084416cd5594defabd6b781e5ba1a8ae81a08847fbb5a9063ef992d" Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.024374 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"d07bd35339a18cb7cc852e3da2443e649dd275338b95fd45f85017f89089c3c5"} Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.863355 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.873020 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.907346 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b734f2d8-19e5-4d25-882b-0f3a468dcde7-operator-scripts\") pod \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\" (UID: \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\") " Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.907595 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4qpg\" (UniqueName: \"kubernetes.io/projected/57e27a9d-d862-41f8-b10d-b28fb268f91c-kube-api-access-f4qpg\") pod \"57e27a9d-d862-41f8-b10d-b28fb268f91c\" (UID: \"57e27a9d-d862-41f8-b10d-b28fb268f91c\") " Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.907841 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lktwr\" (UniqueName: \"kubernetes.io/projected/b734f2d8-19e5-4d25-882b-0f3a468dcde7-kube-api-access-lktwr\") pod \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\" (UID: \"b734f2d8-19e5-4d25-882b-0f3a468dcde7\") " Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.907902 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57e27a9d-d862-41f8-b10d-b28fb268f91c-operator-scripts\") pod \"57e27a9d-d862-41f8-b10d-b28fb268f91c\" (UID: \"57e27a9d-d862-41f8-b10d-b28fb268f91c\") " Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.909783 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57e27a9d-d862-41f8-b10d-b28fb268f91c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "57e27a9d-d862-41f8-b10d-b28fb268f91c" (UID: "57e27a9d-d862-41f8-b10d-b28fb268f91c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.910002 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b734f2d8-19e5-4d25-882b-0f3a468dcde7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b734f2d8-19e5-4d25-882b-0f3a468dcde7" (UID: "b734f2d8-19e5-4d25-882b-0f3a468dcde7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.919202 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b734f2d8-19e5-4d25-882b-0f3a468dcde7-kube-api-access-lktwr" (OuterVolumeSpecName: "kube-api-access-lktwr") pod "b734f2d8-19e5-4d25-882b-0f3a468dcde7" (UID: "b734f2d8-19e5-4d25-882b-0f3a468dcde7"). InnerVolumeSpecName "kube-api-access-lktwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:15 crc kubenswrapper[4780]: I1210 11:14:15.934545 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57e27a9d-d862-41f8-b10d-b28fb268f91c-kube-api-access-f4qpg" (OuterVolumeSpecName: "kube-api-access-f4qpg") pod "57e27a9d-d862-41f8-b10d-b28fb268f91c" (UID: "57e27a9d-d862-41f8-b10d-b28fb268f91c"). InnerVolumeSpecName "kube-api-access-f4qpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.026973 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lktwr\" (UniqueName: \"kubernetes.io/projected/b734f2d8-19e5-4d25-882b-0f3a468dcde7-kube-api-access-lktwr\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.027018 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57e27a9d-d862-41f8-b10d-b28fb268f91c-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.027030 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b734f2d8-19e5-4d25-882b-0f3a468dcde7-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.027039 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4qpg\" (UniqueName: \"kubernetes.io/projected/57e27a9d-d862-41f8-b10d-b28fb268f91c-kube-api-access-f4qpg\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.048795 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5b53-account-create-update-rczqg" event={"ID":"b734f2d8-19e5-4d25-882b-0f3a468dcde7","Type":"ContainerDied","Data":"c2c7bf11f6ef0d08f059e167b104aaa1ffe47d13ba80cea6bf7ce7004e2cc60c"} Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.048857 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2c7bf11f6ef0d08f059e167b104aaa1ffe47d13ba80cea6bf7ce7004e2cc60c" Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.049034 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5b53-account-create-update-rczqg" Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.054841 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-e8d6-account-create-update-75x74" event={"ID":"57e27a9d-d862-41f8-b10d-b28fb268f91c","Type":"ContainerDied","Data":"ee12a833a041184b263e94cdb107db80608b2ef23f6efc748dd8cc1920706d4c"} Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.054908 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee12a833a041184b263e94cdb107db80608b2ef23f6efc748dd8cc1920706d4c" Dec 10 11:14:16 crc kubenswrapper[4780]: I1210 11:14:16.055036 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-e8d6-account-create-update-75x74" Dec 10 11:14:22 crc kubenswrapper[4780]: I1210 11:14:22.538179 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"8f8519f6728595d47f4f228e5684b23181ca49c1935c713f74ad7c02e84dce26"} Dec 10 11:14:22 crc kubenswrapper[4780]: I1210 11:14:22.538776 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"1ffda872cf176e7b62863d3622e5353afbd9ccdd7d399302ccb81aa7803f32ea"} Dec 10 11:14:22 crc kubenswrapper[4780]: I1210 11:14:22.572457 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-td4vw" event={"ID":"d94adae5-67fa-4707-9139-8bd4537a7e77","Type":"ContainerStarted","Data":"80a2100200c8fa235445633119dd010380c3a35c72924ce23b90fa974d894877"} Dec 10 11:14:22 crc kubenswrapper[4780]: I1210 11:14:22.589595 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerStarted","Data":"017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26"} Dec 10 11:14:22 crc kubenswrapper[4780]: I1210 11:14:22.640049 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-td4vw" podStartSLOduration=4.8697425 podStartE2EDuration="17.639993738s" podCreationTimestamp="2025-12-10 11:14:05 +0000 UTC" firstStartedPulling="2025-12-10 11:14:08.048803934 +0000 UTC m=+1752.902197387" lastFinishedPulling="2025-12-10 11:14:20.819055182 +0000 UTC m=+1765.672448625" observedRunningTime="2025-12-10 11:14:22.623958909 +0000 UTC m=+1767.477352342" watchObservedRunningTime="2025-12-10 11:14:22.639993738 +0000 UTC m=+1767.493387201" Dec 10 11:14:22 crc kubenswrapper[4780]: I1210 11:14:22.752258 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=9.477326797 podStartE2EDuration="2m41.75222092s" podCreationTimestamp="2025-12-10 11:11:41 +0000 UTC" firstStartedPulling="2025-12-10 11:11:48.546662463 +0000 UTC m=+1613.400055906" lastFinishedPulling="2025-12-10 11:14:20.821556586 +0000 UTC m=+1765.674950029" observedRunningTime="2025-12-10 11:14:22.747476389 +0000 UTC m=+1767.600869832" watchObservedRunningTime="2025-12-10 11:14:22.75222092 +0000 UTC m=+1767.605614363" Dec 10 11:14:22 crc kubenswrapper[4780]: I1210 11:14:22.959292 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:14:22 crc kubenswrapper[4780]: E1210 11:14:22.959852 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:14:23 crc kubenswrapper[4780]: I1210 11:14:23.618018 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"e0e32740a0fdb3282154587dede20715dc8f93370f484a3d516fc4fe510d14de"} Dec 10 11:14:25 crc kubenswrapper[4780]: I1210 11:14:25.011644 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"18d7906456c12bdf3558850b88bd7dd29948b91e0f5137a22245ef5deb5bca7a"} Dec 10 11:14:25 crc kubenswrapper[4780]: I1210 11:14:25.012654 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"29fe8984a464e769b46b3d0a49a791f0ca2bdb619bd60fb6bb70b59715a5438c"} Dec 10 11:14:25 crc kubenswrapper[4780]: I1210 11:14:25.012692 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"363b90acd71580e7a863e3b66c4076b6951c59a4a5ff35fd34b3f42d566502c0"} Dec 10 11:14:26 crc kubenswrapper[4780]: I1210 11:14:26.036155 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3e1a1225-bdae-4dcb-b10a-02504fe590cd","Type":"ContainerStarted","Data":"ab4ab3cb9d0ecea202f3fc8522e3ef08285793910f7ac6eb9bcf103dbb18cf53"} Dec 10 11:14:26 crc kubenswrapper[4780]: I1210 11:14:26.101937 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=73.280314533 podStartE2EDuration="1m35.10189502s" podCreationTimestamp="2025-12-10 11:12:51 +0000 UTC" firstStartedPulling="2025-12-10 11:13:58.99729736 +0000 UTC m=+1743.850690793" lastFinishedPulling="2025-12-10 11:14:20.818877837 +0000 UTC m=+1765.672271280" observedRunningTime="2025-12-10 11:14:26.091151706 +0000 UTC m=+1770.944545159" watchObservedRunningTime="2025-12-10 11:14:26.10189502 +0000 UTC m=+1770.955288463" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.067124 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-xvt7k"] Dec 10 11:14:28 crc kubenswrapper[4780]: E1210 11:14:28.083610 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7da3d7-458f-4cd4-8a91-432c7395bdcc" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.083706 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7da3d7-458f-4cd4-8a91-432c7395bdcc" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: E1210 11:14:28.083764 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2acffbad-6bc8-4c72-b800-c94b25aeb1bc" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.083777 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="2acffbad-6bc8-4c72-b800-c94b25aeb1bc" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: E1210 11:14:28.083803 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33cacd55-bfbd-437d-9a42-0a883b479efa" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.083814 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="33cacd55-bfbd-437d-9a42-0a883b479efa" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: E1210 11:14:28.083824 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b734f2d8-19e5-4d25-882b-0f3a468dcde7" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.083831 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b734f2d8-19e5-4d25-882b-0f3a468dcde7" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: E1210 11:14:28.083863 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cce757bb-1e72-440d-97e9-63b08f5aa63b" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.083872 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cce757bb-1e72-440d-97e9-63b08f5aa63b" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: E1210 11:14:28.083889 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd7baf7c-6124-4c2f-ade4-302171c1e93f" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.083897 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd7baf7c-6124-4c2f-ade4-302171c1e93f" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: E1210 11:14:28.083949 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="773f068b-e55b-4207-b91b-3fe664bfaec5" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.083959 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="773f068b-e55b-4207-b91b-3fe664bfaec5" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: E1210 11:14:28.083979 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57e27a9d-d862-41f8-b10d-b28fb268f91c" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.083986 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="57e27a9d-d862-41f8-b10d-b28fb268f91c" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.086481 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="773f068b-e55b-4207-b91b-3fe664bfaec5" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.086511 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="2acffbad-6bc8-4c72-b800-c94b25aeb1bc" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.086549 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="57e27a9d-d862-41f8-b10d-b28fb268f91c" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.086568 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cce757bb-1e72-440d-97e9-63b08f5aa63b" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.086872 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="33cacd55-bfbd-437d-9a42-0a883b479efa" containerName="mariadb-database-create" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.086956 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7da3d7-458f-4cd4-8a91-432c7395bdcc" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.087228 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="b734f2d8-19e5-4d25-882b-0f3a468dcde7" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.087347 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd7baf7c-6124-4c2f-ade4-302171c1e93f" containerName="mariadb-account-create-update" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.091644 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-xvt7k"] Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.091940 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.101376 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.239712 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.240492 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.241518 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-config\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.242472 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6jvt\" (UniqueName: \"kubernetes.io/projected/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-kube-api-access-w6jvt\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.242682 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.242952 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-svc\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.260074 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.260513 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="prometheus" containerID="cri-o://606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0" gracePeriod=600 Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.260782 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="thanos-sidecar" containerID="cri-o://017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26" gracePeriod=600 Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.260971 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="config-reloader" containerID="cri-o://d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868" gracePeriod=600 Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.346432 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.347231 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.347467 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-config\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.350599 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.351393 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-config\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.352197 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6jvt\" (UniqueName: \"kubernetes.io/projected/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-kube-api-access-w6jvt\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.352500 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.352735 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-svc\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.354515 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-svc\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.356460 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.357419 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.417791 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6jvt\" (UniqueName: \"kubernetes.io/projected/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-kube-api-access-w6jvt\") pod \"dnsmasq-dns-764c5664d7-xvt7k\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:28 crc kubenswrapper[4780]: I1210 11:14:28.490463 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:29 crc kubenswrapper[4780]: I1210 11:14:29.768689 4780 generic.go:334] "Generic (PLEG): container finished" podID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerID="017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26" exitCode=0 Dec 10 11:14:29 crc kubenswrapper[4780]: I1210 11:14:29.772008 4780 generic.go:334] "Generic (PLEG): container finished" podID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerID="606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0" exitCode=0 Dec 10 11:14:29 crc kubenswrapper[4780]: I1210 11:14:29.770869 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerDied","Data":"017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26"} Dec 10 11:14:29 crc kubenswrapper[4780]: I1210 11:14:29.772264 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerDied","Data":"606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0"} Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.286446 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-xvt7k"] Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.586726 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.717029 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-thanos-prometheus-http-client-file\") pod \"bef902c7-4e5f-4af9-bda4-0c92b8521901\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.717159 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-tls-assets\") pod \"bef902c7-4e5f-4af9-bda4-0c92b8521901\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.717266 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"bef902c7-4e5f-4af9-bda4-0c92b8521901\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.717329 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-config\") pod \"bef902c7-4e5f-4af9-bda4-0c92b8521901\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.717514 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/bef902c7-4e5f-4af9-bda4-0c92b8521901-config-out\") pod \"bef902c7-4e5f-4af9-bda4-0c92b8521901\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.717618 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/bef902c7-4e5f-4af9-bda4-0c92b8521901-prometheus-metric-storage-rulefiles-0\") pod \"bef902c7-4e5f-4af9-bda4-0c92b8521901\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.717676 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtssp\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-kube-api-access-xtssp\") pod \"bef902c7-4e5f-4af9-bda4-0c92b8521901\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.717721 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-web-config\") pod \"bef902c7-4e5f-4af9-bda4-0c92b8521901\" (UID: \"bef902c7-4e5f-4af9-bda4-0c92b8521901\") " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.724102 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bef902c7-4e5f-4af9-bda4-0c92b8521901-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "bef902c7-4e5f-4af9-bda4-0c92b8521901" (UID: "bef902c7-4e5f-4af9-bda4-0c92b8521901"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.726683 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "bef902c7-4e5f-4af9-bda4-0c92b8521901" (UID: "bef902c7-4e5f-4af9-bda4-0c92b8521901"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.738263 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-config" (OuterVolumeSpecName: "config") pod "bef902c7-4e5f-4af9-bda4-0c92b8521901" (UID: "bef902c7-4e5f-4af9-bda4-0c92b8521901"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.746998 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "bef902c7-4e5f-4af9-bda4-0c92b8521901" (UID: "bef902c7-4e5f-4af9-bda4-0c92b8521901"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.751343 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-kube-api-access-xtssp" (OuterVolumeSpecName: "kube-api-access-xtssp") pod "bef902c7-4e5f-4af9-bda4-0c92b8521901" (UID: "bef902c7-4e5f-4af9-bda4-0c92b8521901"). InnerVolumeSpecName "kube-api-access-xtssp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.751533 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bef902c7-4e5f-4af9-bda4-0c92b8521901-config-out" (OuterVolumeSpecName: "config-out") pod "bef902c7-4e5f-4af9-bda4-0c92b8521901" (UID: "bef902c7-4e5f-4af9-bda4-0c92b8521901"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.752935 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "bef902c7-4e5f-4af9-bda4-0c92b8521901" (UID: "bef902c7-4e5f-4af9-bda4-0c92b8521901"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.798138 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" event={"ID":"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246","Type":"ContainerStarted","Data":"7f375396f970a0322c88745bdba2c333bdfe2e0e2a746080c8ea773fabc5bc84"} Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.806357 4780 generic.go:334] "Generic (PLEG): container finished" podID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerID="d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868" exitCode=0 Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.806429 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerDied","Data":"d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868"} Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.806472 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"bef902c7-4e5f-4af9-bda4-0c92b8521901","Type":"ContainerDied","Data":"4413fd9fc37001d8606e09b30b54f7344fdb99fa6f034cbd6c180a34e148583c"} Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.806464 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.806536 4780 scope.go:117] "RemoveContainer" containerID="017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.822051 4780 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/bef902c7-4e5f-4af9-bda4-0c92b8521901-config-out\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.822092 4780 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/bef902c7-4e5f-4af9-bda4-0c92b8521901-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.822104 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtssp\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-kube-api-access-xtssp\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.822119 4780 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.822132 4780 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/bef902c7-4e5f-4af9-bda4-0c92b8521901-tls-assets\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.822163 4780 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.822175 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.852035 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-web-config" (OuterVolumeSpecName: "web-config") pod "bef902c7-4e5f-4af9-bda4-0c92b8521901" (UID: "bef902c7-4e5f-4af9-bda4-0c92b8521901"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.889290 4780 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.927047 4780 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.927101 4780 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/bef902c7-4e5f-4af9-bda4-0c92b8521901-web-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.965185 4780 scope.go:117] "RemoveContainer" containerID="d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868" Dec 10 11:14:30 crc kubenswrapper[4780]: I1210 11:14:30.996892 4780 scope.go:117] "RemoveContainer" containerID="606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.046472 4780 scope.go:117] "RemoveContainer" containerID="8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.455432 4780 scope.go:117] "RemoveContainer" containerID="017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26" Dec 10 11:14:31 crc kubenswrapper[4780]: E1210 11:14:31.458720 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26\": container with ID starting with 017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26 not found: ID does not exist" containerID="017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.458818 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26"} err="failed to get container status \"017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26\": rpc error: code = NotFound desc = could not find container \"017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26\": container with ID starting with 017e9d8e6ad8e954a7f32a7035a485bba683e50db7ea616925e950c3d43f5a26 not found: ID does not exist" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.458868 4780 scope.go:117] "RemoveContainer" containerID="d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868" Dec 10 11:14:31 crc kubenswrapper[4780]: E1210 11:14:31.460979 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868\": container with ID starting with d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868 not found: ID does not exist" containerID="d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.461042 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868"} err="failed to get container status \"d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868\": rpc error: code = NotFound desc = could not find container \"d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868\": container with ID starting with d8a5458beaac106b6242819cdeeeb432c7188934cbca0875f75b088a671a9868 not found: ID does not exist" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.461079 4780 scope.go:117] "RemoveContainer" containerID="606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0" Dec 10 11:14:31 crc kubenswrapper[4780]: E1210 11:14:31.475675 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0\": container with ID starting with 606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0 not found: ID does not exist" containerID="606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.476306 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0"} err="failed to get container status \"606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0\": rpc error: code = NotFound desc = could not find container \"606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0\": container with ID starting with 606ddda9fa20a44ab28de4c48bf44767eab3cd7716eef33f4f85fcc84bc046a0 not found: ID does not exist" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.476495 4780 scope.go:117] "RemoveContainer" containerID="8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4" Dec 10 11:14:31 crc kubenswrapper[4780]: E1210 11:14:31.479502 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4\": container with ID starting with 8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4 not found: ID does not exist" containerID="8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.479624 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4"} err="failed to get container status \"8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4\": rpc error: code = NotFound desc = could not find container \"8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4\": container with ID starting with 8eddc0fcf9942c109477abdc701fb351c6fa744d3e546767239affa74a86cdc4 not found: ID does not exist" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.563711 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.581664 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.621371 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:14:31 crc kubenswrapper[4780]: E1210 11:14:31.622145 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="prometheus" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.622169 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="prometheus" Dec 10 11:14:31 crc kubenswrapper[4780]: E1210 11:14:31.622207 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="thanos-sidecar" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.622217 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="thanos-sidecar" Dec 10 11:14:31 crc kubenswrapper[4780]: E1210 11:14:31.622230 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="config-reloader" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.622236 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="config-reloader" Dec 10 11:14:31 crc kubenswrapper[4780]: E1210 11:14:31.622249 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="init-config-reloader" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.622262 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="init-config-reloader" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.622562 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="prometheus" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.622589 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="thanos-sidecar" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.622608 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" containerName="config-reloader" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.626139 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.639004 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.639422 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.639590 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.639995 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.639445 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.640327 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-xtn45" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.673072 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.715185 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.827465 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.827886 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.828047 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vkht\" (UniqueName: \"kubernetes.io/projected/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-kube-api-access-8vkht\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.828199 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.828506 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.828685 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.828870 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.829037 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.829529 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-config\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.829768 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.830224 4780 generic.go:334] "Generic (PLEG): container finished" podID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerID="92f6b1ecdb29a0c9786a855167fe356b7ad610fdeca435f643b6762be6e1356b" exitCode=0 Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.830178 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.830283 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" event={"ID":"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246","Type":"ContainerDied","Data":"92f6b1ecdb29a0c9786a855167fe356b7ad610fdeca435f643b6762be6e1356b"} Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.933376 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.933605 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.933754 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.933802 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.933872 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vkht\" (UniqueName: \"kubernetes.io/projected/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-kube-api-access-8vkht\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.934016 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.934097 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.934191 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.934304 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.934459 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.934530 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-config\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.936733 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.940032 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.945545 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-config\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.947063 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.958233 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.970766 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.971585 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.972475 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.973115 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vkht\" (UniqueName: \"kubernetes.io/projected/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-kube-api-access-8vkht\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.973364 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:31 crc kubenswrapper[4780]: I1210 11:14:31.977635 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a75f82ba-b1e5-45cc-8e35-dd8c75c21247-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:32 crc kubenswrapper[4780]: I1210 11:14:32.004306 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bef902c7-4e5f-4af9-bda4-0c92b8521901" path="/var/lib/kubelet/pods/bef902c7-4e5f-4af9-bda4-0c92b8521901/volumes" Dec 10 11:14:32 crc kubenswrapper[4780]: I1210 11:14:32.035299 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"prometheus-metric-storage-0\" (UID: \"a75f82ba-b1e5-45cc-8e35-dd8c75c21247\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:32 crc kubenswrapper[4780]: I1210 11:14:32.266907 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:14:32 crc kubenswrapper[4780]: I1210 11:14:32.870120 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" event={"ID":"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246","Type":"ContainerStarted","Data":"f34dbe7649dd70ff645b8e146b369a60b6f3748ce56a163b50b5f3aabd51bb61"} Dec 10 11:14:32 crc kubenswrapper[4780]: I1210 11:14:32.872212 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:32 crc kubenswrapper[4780]: I1210 11:14:32.917409 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" podStartSLOduration=6.917381852 podStartE2EDuration="6.917381852s" podCreationTimestamp="2025-12-10 11:14:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:32.910708981 +0000 UTC m=+1777.764102434" watchObservedRunningTime="2025-12-10 11:14:32.917381852 +0000 UTC m=+1777.770775295" Dec 10 11:14:33 crc kubenswrapper[4780]: I1210 11:14:33.186752 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:14:33 crc kubenswrapper[4780]: W1210 11:14:33.191333 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda75f82ba_b1e5_45cc_8e35_dd8c75c21247.slice/crio-44c8ea70e5876f7d474f7ffcdf71a36b0da0a9b653d6e6188bcc1d256ee3d573 WatchSource:0}: Error finding container 44c8ea70e5876f7d474f7ffcdf71a36b0da0a9b653d6e6188bcc1d256ee3d573: Status 404 returned error can't find the container with id 44c8ea70e5876f7d474f7ffcdf71a36b0da0a9b653d6e6188bcc1d256ee3d573 Dec 10 11:14:34 crc kubenswrapper[4780]: I1210 11:14:34.117228 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:14:34 crc kubenswrapper[4780]: E1210 11:14:34.138710 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:14:34 crc kubenswrapper[4780]: I1210 11:14:34.204642 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a75f82ba-b1e5-45cc-8e35-dd8c75c21247","Type":"ContainerStarted","Data":"44c8ea70e5876f7d474f7ffcdf71a36b0da0a9b653d6e6188bcc1d256ee3d573"} Dec 10 11:14:35 crc kubenswrapper[4780]: I1210 11:14:35.185751 4780 generic.go:334] "Generic (PLEG): container finished" podID="d94adae5-67fa-4707-9139-8bd4537a7e77" containerID="80a2100200c8fa235445633119dd010380c3a35c72924ce23b90fa974d894877" exitCode=0 Dec 10 11:14:35 crc kubenswrapper[4780]: I1210 11:14:35.185827 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-td4vw" event={"ID":"d94adae5-67fa-4707-9139-8bd4537a7e77","Type":"ContainerDied","Data":"80a2100200c8fa235445633119dd010380c3a35c72924ce23b90fa974d894877"} Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.054248 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.127898 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmztv\" (UniqueName: \"kubernetes.io/projected/d94adae5-67fa-4707-9139-8bd4537a7e77-kube-api-access-zmztv\") pod \"d94adae5-67fa-4707-9139-8bd4537a7e77\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.128138 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-config-data\") pod \"d94adae5-67fa-4707-9139-8bd4537a7e77\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.128240 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-combined-ca-bundle\") pod \"d94adae5-67fa-4707-9139-8bd4537a7e77\" (UID: \"d94adae5-67fa-4707-9139-8bd4537a7e77\") " Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.299219 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d94adae5-67fa-4707-9139-8bd4537a7e77-kube-api-access-zmztv" (OuterVolumeSpecName: "kube-api-access-zmztv") pod "d94adae5-67fa-4707-9139-8bd4537a7e77" (UID: "d94adae5-67fa-4707-9139-8bd4537a7e77"). InnerVolumeSpecName "kube-api-access-zmztv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.335894 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmztv\" (UniqueName: \"kubernetes.io/projected/d94adae5-67fa-4707-9139-8bd4537a7e77-kube-api-access-zmztv\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.338574 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d94adae5-67fa-4707-9139-8bd4537a7e77" (UID: "d94adae5-67fa-4707-9139-8bd4537a7e77"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.361377 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-config-data" (OuterVolumeSpecName: "config-data") pod "d94adae5-67fa-4707-9139-8bd4537a7e77" (UID: "d94adae5-67fa-4707-9139-8bd4537a7e77"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.431013 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-td4vw" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.431002 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-td4vw" event={"ID":"d94adae5-67fa-4707-9139-8bd4537a7e77","Type":"ContainerDied","Data":"fe31ab5b6b1a30f201d932d20c3308c4a2acadfc37e3957461ca63ba944d0503"} Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.431203 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe31ab5b6b1a30f201d932d20c3308c4a2acadfc37e3957461ca63ba944d0503" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.439167 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.439220 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d94adae5-67fa-4707-9139-8bd4537a7e77-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.440001 4780 generic.go:334] "Generic (PLEG): container finished" podID="626bc022-de20-4c32-ad5b-bd22a54340ce" containerID="c0507c16b5ffcf039df86cb1cef6f8cb84cd34ce350c52b957e2bdc58d9d3487" exitCode=0 Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.440087 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m9l8h" event={"ID":"626bc022-de20-4c32-ad5b-bd22a54340ce","Type":"ContainerDied","Data":"c0507c16b5ffcf039df86cb1cef6f8cb84cd34ce350c52b957e2bdc58d9d3487"} Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.807299 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-jzgb2"] Dec 10 11:14:37 crc kubenswrapper[4780]: E1210 11:14:37.807817 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d94adae5-67fa-4707-9139-8bd4537a7e77" containerName="keystone-db-sync" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.807837 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d94adae5-67fa-4707-9139-8bd4537a7e77" containerName="keystone-db-sync" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.808105 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d94adae5-67fa-4707-9139-8bd4537a7e77" containerName="keystone-db-sync" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.809005 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.812344 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.812498 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.812559 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-86tfj" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.812931 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.817418 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.846493 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jzgb2"] Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.852372 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-config-data\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.852444 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5snk\" (UniqueName: \"kubernetes.io/projected/544e0fa4-06de-4bd3-8793-303a741a1e53-kube-api-access-b5snk\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.852472 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-fernet-keys\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.852506 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-scripts\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.852562 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-credential-keys\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.853293 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-combined-ca-bundle\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.936531 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-xvt7k"] Dec 10 11:14:37 crc kubenswrapper[4780]: I1210 11:14:37.937010 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" podUID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerName="dnsmasq-dns" containerID="cri-o://f34dbe7649dd70ff645b8e146b369a60b6f3748ce56a163b50b5f3aabd51bb61" gracePeriod=10 Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:37.943194 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:37.956486 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-combined-ca-bundle\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:37.956613 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-config-data\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:37.956651 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-fernet-keys\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:37.956671 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5snk\" (UniqueName: \"kubernetes.io/projected/544e0fa4-06de-4bd3-8793-303a741a1e53-kube-api-access-b5snk\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:37.956716 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-scripts\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:37.956770 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-credential-keys\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:37.990876 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-scripts\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.013723 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-credential-keys\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.015673 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-fernet-keys\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.019458 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-vcml6"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.021701 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.027552 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5snk\" (UniqueName: \"kubernetes.io/projected/544e0fa4-06de-4bd3-8793-303a741a1e53-kube-api-access-b5snk\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.030202 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-9plgw" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.030564 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.031506 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-config-data\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.061447 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-combined-ca-bundle\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.061588 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hhlf\" (UniqueName: \"kubernetes.io/projected/253b0c60-6211-4e23-921c-b8c34ccc4e25-kube-api-access-2hhlf\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.061834 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-config-data\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.089084 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-vcml6"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.138237 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-combined-ca-bundle\") pod \"keystone-bootstrap-jzgb2\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.164674 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-vxt8c"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.165308 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-config-data\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.165512 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-combined-ca-bundle\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.165605 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hhlf\" (UniqueName: \"kubernetes.io/projected/253b0c60-6211-4e23-921c-b8c34ccc4e25-kube-api-access-2hhlf\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.182395 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.195967 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-config-data\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.225431 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hhlf\" (UniqueName: \"kubernetes.io/projected/253b0c60-6211-4e23-921c-b8c34ccc4e25-kube-api-access-2hhlf\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.230950 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-combined-ca-bundle\") pod \"heat-db-sync-vcml6\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.292043 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-vxt8c"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.337964 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-wqs8s"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.340334 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.349723 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.353071 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-hdqfg" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.354510 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.374191 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.374313 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.374382 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l2g5\" (UniqueName: \"kubernetes.io/projected/c36308e6-e280-49e1-820c-5649e6ef0077-kube-api-access-6l2g5\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.374461 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.374494 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-svc\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.374540 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-config\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.380988 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-wqs8s"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.415048 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-cwb2h"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.417688 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.423950 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.424584 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-5dbzk" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.435849 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.437492 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.452524 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-cwb2h"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.490265 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-svc\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.490412 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-config-data\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.490493 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-config\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.490738 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.490808 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-scripts\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.490874 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhq8m\" (UniqueName: \"kubernetes.io/projected/f2a06360-9c37-4ae4-8148-73c37d2be5a4-kube-api-access-nhq8m\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.491014 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-db-sync-config-data\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.491065 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.491160 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f2a06360-9c37-4ae4-8148-73c37d2be5a4-etc-machine-id\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.491204 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l2g5\" (UniqueName: \"kubernetes.io/projected/c36308e6-e280-49e1-820c-5649e6ef0077-kube-api-access-6l2g5\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.491270 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-combined-ca-bundle\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.491369 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.498844 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.499525 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-svc\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.500516 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-config\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.501134 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.501424 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.502379 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" podUID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.175:5353: connect: connection refused" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.678135 4780 generic.go:334] "Generic (PLEG): container finished" podID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerID="f34dbe7649dd70ff645b8e146b369a60b6f3748ce56a163b50b5f3aabd51bb61" exitCode=0 Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.678447 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" event={"ID":"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246","Type":"ContainerDied","Data":"f34dbe7649dd70ff645b8e146b369a60b6f3748ce56a163b50b5f3aabd51bb61"} Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.696687 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-config\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.697489 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-combined-ca-bundle\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.697561 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-scripts\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.697643 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72d29\" (UniqueName: \"kubernetes.io/projected/0613207e-c071-4295-a536-f037ee6fe446-kube-api-access-72d29\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.697691 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhq8m\" (UniqueName: \"kubernetes.io/projected/f2a06360-9c37-4ae4-8148-73c37d2be5a4-kube-api-access-nhq8m\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.697853 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-db-sync-config-data\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.700618 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f2a06360-9c37-4ae4-8148-73c37d2be5a4-etc-machine-id\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.731111 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f2a06360-9c37-4ae4-8148-73c37d2be5a4-etc-machine-id\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.731442 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-combined-ca-bundle\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.731812 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-config-data\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.767051 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-hn59m"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.784416 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.805743 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.838465 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-np76s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.841251 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-combined-ca-bundle\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.841404 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-config\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.841439 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-combined-ca-bundle\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.841491 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72d29\" (UniqueName: \"kubernetes.io/projected/0613207e-c071-4295-a536-f037ee6fe446-kube-api-access-72d29\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.841699 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-db-sync-config-data\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.841771 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtvp7\" (UniqueName: \"kubernetes.io/projected/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-kube-api-access-gtvp7\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.853256 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-config\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.867204 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-db-sync-config-data\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.867334 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l2g5\" (UniqueName: \"kubernetes.io/projected/c36308e6-e280-49e1-820c-5649e6ef0077-kube-api-access-6l2g5\") pod \"dnsmasq-dns-5959f8865f-vxt8c\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.867547 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-scripts\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.883013 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhq8m\" (UniqueName: \"kubernetes.io/projected/f2a06360-9c37-4ae4-8148-73c37d2be5a4-kube-api-access-nhq8m\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.884222 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-config-data\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.887912 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-combined-ca-bundle\") pod \"cinder-db-sync-wqs8s\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.906439 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-combined-ca-bundle\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.919843 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72d29\" (UniqueName: \"kubernetes.io/projected/0613207e-c071-4295-a536-f037ee6fe446-kube-api-access-72d29\") pod \"neutron-db-sync-cwb2h\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.931040 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-hn59m"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.944822 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-db-sync-config-data\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.944966 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtvp7\" (UniqueName: \"kubernetes.io/projected/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-kube-api-access-gtvp7\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.945080 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-combined-ca-bundle\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.981784 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-hzmqc"] Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.988691 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:38 crc kubenswrapper[4780]: I1210 11:14:38.995606 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-db-sync-config-data\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.001037 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-combined-ca-bundle\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.015340 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.015845 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-shhvb" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.016347 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.050208 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61593545-0480-4729-b6d1-ba4089e68f7a-logs\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.050774 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-config-data\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.054038 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clw5l\" (UniqueName: \"kubernetes.io/projected/61593545-0480-4729-b6d1-ba4089e68f7a-kube-api-access-clw5l\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.054738 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-scripts\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.054982 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-combined-ca-bundle\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.058966 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtvp7\" (UniqueName: \"kubernetes.io/projected/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-kube-api-access-gtvp7\") pod \"barbican-db-sync-hn59m\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.112532 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-hzmqc"] Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.159039 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61593545-0480-4729-b6d1-ba4089e68f7a-logs\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.159150 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-config-data\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.159225 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clw5l\" (UniqueName: \"kubernetes.io/projected/61593545-0480-4729-b6d1-ba4089e68f7a-kube-api-access-clw5l\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.159339 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-scripts\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.159390 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-combined-ca-bundle\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.188095 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61593545-0480-4729-b6d1-ba4089e68f7a-logs\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.196018 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-combined-ca-bundle\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.211765 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-scripts\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.251210 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clw5l\" (UniqueName: \"kubernetes.io/projected/61593545-0480-4729-b6d1-ba4089e68f7a-kube-api-access-clw5l\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.252094 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-config-data\") pod \"placement-db-sync-hzmqc\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.256261 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-vxt8c"] Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.313046 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.318317 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.326745 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.346284 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.459675 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-vcml6" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.482581 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.491589 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.492235 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-run-httpd\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.492886 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-log-httpd\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.493008 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.494701 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-config-data\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.494981 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-994dj\" (UniqueName: \"kubernetes.io/projected/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-kube-api-access-994dj\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.495041 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-scripts\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.551117 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.553136 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.566791 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.594265 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-v5dwx"] Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.599980 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.600233 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hn59m" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.603725 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-log-httpd\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.603787 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.603835 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.603876 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-config-data\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.603967 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-994dj\" (UniqueName: \"kubernetes.io/projected/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-kube-api-access-994dj\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.603997 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-scripts\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.604028 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.604233 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-config\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.604569 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.604759 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.605150 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.605182 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-run-httpd\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.604906 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-log-httpd\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.605211 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9khd\" (UniqueName: \"kubernetes.io/projected/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-kube-api-access-f9khd\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.610425 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-run-httpd\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.624614 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-scripts\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.627103 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.628949 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-config-data\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.630861 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.649942 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-v5dwx"] Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.657098 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hzmqc" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.673037 4780 trace.go:236] Trace[1131199315]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-index-gateway-0" (10-Dec-2025 11:14:38.256) (total time: 1416ms): Dec 10 11:14:39 crc kubenswrapper[4780]: Trace[1131199315]: [1.416507291s] [1.416507291s] END Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.677568 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-994dj\" (UniqueName: \"kubernetes.io/projected/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-kube-api-access-994dj\") pod \"ceilometer-0\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.712294 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.713445 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.713544 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.713585 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9khd\" (UniqueName: \"kubernetes.io/projected/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-kube-api-access-f9khd\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.713751 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.713859 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-config\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.713891 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.750986 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.758861 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.761808 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.770932 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.771864 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-config\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.774270 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9khd\" (UniqueName: \"kubernetes.io/projected/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-kube-api-access-f9khd\") pod \"dnsmasq-dns-58dd9ff6bc-v5dwx\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.776223 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.777045 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.801883 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" event={"ID":"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246","Type":"ContainerDied","Data":"7f375396f970a0322c88745bdba2c333bdfe2e0e2a746080c8ea773fabc5bc84"} Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.801987 4780 scope.go:117] "RemoveContainer" containerID="f34dbe7649dd70ff645b8e146b369a60b6f3748ce56a163b50b5f3aabd51bb61" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.929866 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-svc\") pod \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.932624 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-sb\") pod \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.932782 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-swift-storage-0\") pod \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.932897 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-nb\") pod \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.933068 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-config\") pod \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.933137 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6jvt\" (UniqueName: \"kubernetes.io/projected/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-kube-api-access-w6jvt\") pod \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.943304 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-kube-api-access-w6jvt" (OuterVolumeSpecName: "kube-api-access-w6jvt") pod "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" (UID: "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246"). InnerVolumeSpecName "kube-api-access-w6jvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:39 crc kubenswrapper[4780]: I1210 11:14:39.962323 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6jvt\" (UniqueName: \"kubernetes.io/projected/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-kube-api-access-w6jvt\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.063943 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" (UID: "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.070106 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-svc\") pod \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\" (UID: \"6cb4c5f3-27bc-4e51-be0f-7b52e6f34246\") " Dec 10 11:14:40 crc kubenswrapper[4780]: W1210 11:14:40.070465 4780 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246/volumes/kubernetes.io~configmap/dns-svc Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.070504 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" (UID: "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.072213 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.080336 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-config" (OuterVolumeSpecName: "config") pod "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" (UID: "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.092113 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" (UID: "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.095902 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" (UID: "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.136704 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" (UID: "6cb4c5f3-27bc-4e51-be0f-7b52e6f34246"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.176749 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.176804 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.176817 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.176828 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.291554 4780 scope.go:117] "RemoveContainer" containerID="92f6b1ecdb29a0c9786a855167fe356b7ad610fdeca435f643b6762be6e1356b" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.368621 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-jzgb2"] Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.498336 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m9l8h" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.610759 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-db-sync-config-data\") pod \"626bc022-de20-4c32-ad5b-bd22a54340ce\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.610932 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-config-data\") pod \"626bc022-de20-4c32-ad5b-bd22a54340ce\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.611028 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-combined-ca-bundle\") pod \"626bc022-de20-4c32-ad5b-bd22a54340ce\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.611535 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kt92d\" (UniqueName: \"kubernetes.io/projected/626bc022-de20-4c32-ad5b-bd22a54340ce-kube-api-access-kt92d\") pod \"626bc022-de20-4c32-ad5b-bd22a54340ce\" (UID: \"626bc022-de20-4c32-ad5b-bd22a54340ce\") " Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.844755 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "626bc022-de20-4c32-ad5b-bd22a54340ce" (UID: "626bc022-de20-4c32-ad5b-bd22a54340ce"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.860115 4780 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:40 crc kubenswrapper[4780]: W1210 11:14:40.878122 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod544e0fa4_06de_4bd3_8793_303a741a1e53.slice/crio-43a0a522813a45515e48379cbd4bcf6c15969d8884125cd44bfe7b95646d7fa4 WatchSource:0}: Error finding container 43a0a522813a45515e48379cbd4bcf6c15969d8884125cd44bfe7b95646d7fa4: Status 404 returned error can't find the container with id 43a0a522813a45515e48379cbd4bcf6c15969d8884125cd44bfe7b95646d7fa4 Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.885824 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/626bc022-de20-4c32-ad5b-bd22a54340ce-kube-api-access-kt92d" (OuterVolumeSpecName: "kube-api-access-kt92d") pod "626bc022-de20-4c32-ad5b-bd22a54340ce" (UID: "626bc022-de20-4c32-ad5b-bd22a54340ce"). InnerVolumeSpecName "kube-api-access-kt92d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:40 crc kubenswrapper[4780]: I1210 11:14:40.971769 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "626bc022-de20-4c32-ad5b-bd22a54340ce" (UID: "626bc022-de20-4c32-ad5b-bd22a54340ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.006303 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a75f82ba-b1e5-45cc-8e35-dd8c75c21247","Type":"ContainerStarted","Data":"4d7c345aeebdb291cff6d296f990d290230994fa4233b689d727d9d9f2d07bfe"} Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.032288 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kt92d\" (UniqueName: \"kubernetes.io/projected/626bc022-de20-4c32-ad5b-bd22a54340ce-kube-api-access-kt92d\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.032364 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.041243 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m9l8h" Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.042014 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m9l8h" event={"ID":"626bc022-de20-4c32-ad5b-bd22a54340ce","Type":"ContainerDied","Data":"adb12627998d3460f06fcb9fa3a677ac62751cd8ce63b212d5d1e3e9105606aa"} Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.042141 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adb12627998d3460f06fcb9fa3a677ac62751cd8ce63b212d5d1e3e9105606aa" Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.060692 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-xvt7k" Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.072010 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-vcml6"] Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.217594 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-config-data" (OuterVolumeSpecName: "config-data") pod "626bc022-de20-4c32-ad5b-bd22a54340ce" (UID: "626bc022-de20-4c32-ad5b-bd22a54340ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.261484 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/626bc022-de20-4c32-ad5b-bd22a54340ce-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.654204 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-xvt7k"] Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.704742 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-xvt7k"] Dec 10 11:14:41 crc kubenswrapper[4780]: I1210 11:14:41.996164 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" path="/var/lib/kubelet/pods/6cb4c5f3-27bc-4e51-be0f-7b52e6f34246/volumes" Dec 10 11:14:42 crc kubenswrapper[4780]: I1210 11:14:42.172452 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-vcml6" event={"ID":"253b0c60-6211-4e23-921c-b8c34ccc4e25","Type":"ContainerStarted","Data":"da679a3b98a99b4b54e1bea3049e6c712f4b19373ec12fe4f824552c7caa52cb"} Dec 10 11:14:42 crc kubenswrapper[4780]: I1210 11:14:42.201647 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jzgb2" event={"ID":"544e0fa4-06de-4bd3-8793-303a741a1e53","Type":"ContainerStarted","Data":"2be4b9bdd746ab4e7cbde6d8d26a1dd11549ed2a7276bde6dfb93d5b25013263"} Dec 10 11:14:42 crc kubenswrapper[4780]: I1210 11:14:42.201749 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jzgb2" event={"ID":"544e0fa4-06de-4bd3-8793-303a741a1e53","Type":"ContainerStarted","Data":"43a0a522813a45515e48379cbd4bcf6c15969d8884125cd44bfe7b95646d7fa4"} Dec 10 11:14:42 crc kubenswrapper[4780]: I1210 11:14:42.315340 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-jzgb2" podStartSLOduration=5.315307952 podStartE2EDuration="5.315307952s" podCreationTimestamp="2025-12-10 11:14:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:42.300937695 +0000 UTC m=+1787.154331138" watchObservedRunningTime="2025-12-10 11:14:42.315307952 +0000 UTC m=+1787.168701395" Dec 10 11:14:43 crc kubenswrapper[4780]: I1210 11:14:43.528171 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-vxt8c"] Dec 10 11:14:43 crc kubenswrapper[4780]: I1210 11:14:43.568616 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-cwb2h"] Dec 10 11:14:43 crc kubenswrapper[4780]: W1210 11:14:43.617386 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0613207e_c071_4295_a536_f037ee6fe446.slice/crio-f6f5b6398f37368334b2a916953d352093a3fc5c9cc53ed1f266f23146d822fd WatchSource:0}: Error finding container f6f5b6398f37368334b2a916953d352093a3fc5c9cc53ed1f266f23146d822fd: Status 404 returned error can't find the container with id f6f5b6398f37368334b2a916953d352093a3fc5c9cc53ed1f266f23146d822fd Dec 10 11:14:43 crc kubenswrapper[4780]: I1210 11:14:43.753984 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-hzmqc"] Dec 10 11:14:43 crc kubenswrapper[4780]: I1210 11:14:43.794203 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-v5dwx"] Dec 10 11:14:43 crc kubenswrapper[4780]: I1210 11:14:43.842200 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-hn59m"] Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.210375 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:14:44 crc kubenswrapper[4780]: W1210 11:14:44.247852 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2a06360_9c37_4ae4_8148_73c37d2be5a4.slice/crio-d67bdf26450e143660c8fb95c65ab68434cf08bc7ec098ca71447cad3d131717 WatchSource:0}: Error finding container d67bdf26450e143660c8fb95c65ab68434cf08bc7ec098ca71447cad3d131717: Status 404 returned error can't find the container with id d67bdf26450e143660c8fb95c65ab68434cf08bc7ec098ca71447cad3d131717 Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.267616 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-wqs8s"] Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.310202 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-v5dwx"] Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.343670 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-jppm6"] Dec 10 11:14:44 crc kubenswrapper[4780]: E1210 11:14:44.345333 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerName="init" Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.346420 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerName="init" Dec 10 11:14:44 crc kubenswrapper[4780]: E1210 11:14:44.346542 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerName="dnsmasq-dns" Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.348071 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerName="dnsmasq-dns" Dec 10 11:14:44 crc kubenswrapper[4780]: E1210 11:14:44.358831 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="626bc022-de20-4c32-ad5b-bd22a54340ce" containerName="glance-db-sync" Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.359319 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="626bc022-de20-4c32-ad5b-bd22a54340ce" containerName="glance-db-sync" Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.360371 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cb4c5f3-27bc-4e51-be0f-7b52e6f34246" containerName="dnsmasq-dns" Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.360579 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="626bc022-de20-4c32-ad5b-bd22a54340ce" containerName="glance-db-sync" Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.371292 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:44 crc kubenswrapper[4780]: I1210 11:14:44.381755 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-jppm6"] Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.396568 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvdh4\" (UniqueName: \"kubernetes.io/projected/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-kube-api-access-fvdh4\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.397240 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.397329 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-config\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.397411 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.397568 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.397600 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.546410 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d74fb1d1-0533-4202-9ee5-4a1c04ca6971","Type":"ContainerStarted","Data":"cc1c16c189e2cb86d4b630164daef210aa21060b378d34b6bd382d8952b48cfd"} Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.619248 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-wqs8s" event={"ID":"f2a06360-9c37-4ae4-8148-73c37d2be5a4","Type":"ContainerStarted","Data":"d67bdf26450e143660c8fb95c65ab68434cf08bc7ec098ca71447cad3d131717"} Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.623482 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.636742 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.643978 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.644072 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.644195 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvdh4\" (UniqueName: \"kubernetes.io/projected/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-kube-api-access-fvdh4\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.647031 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.647422 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-4dhzr" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.648089 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.649449 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.657613 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.658027 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-config\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.658372 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.669701 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.673742 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-config\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.673411 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.686032 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hzmqc" event={"ID":"61593545-0480-4729-b6d1-ba4089e68f7a","Type":"ContainerStarted","Data":"0c2e060d83aa95614722ce2025c0d9bae3ba6776344cb6fd2aa1ab4ecb40148f"} Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.686812 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.726416 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" event={"ID":"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1","Type":"ContainerStarted","Data":"ea2258f451e6db15fb06c2d72e04b97990c0acd75649cad46f34e76929d0bd42"} Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.758187 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.802969 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" event={"ID":"c36308e6-e280-49e1-820c-5649e6ef0077","Type":"ContainerStarted","Data":"58a92a33d3cf930b42f0766417173935c4837adaf9b40532c35788536acc56cb"} Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.810733 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvdh4\" (UniqueName: \"kubernetes.io/projected/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-kube-api-access-fvdh4\") pod \"dnsmasq-dns-785d8bcb8c-jppm6\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.818642 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cwb2h" event={"ID":"0613207e-c071-4295-a536-f037ee6fe446","Type":"ContainerStarted","Data":"f6f5b6398f37368334b2a916953d352093a3fc5c9cc53ed1f266f23146d822fd"} Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.837077 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hn59m" event={"ID":"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9","Type":"ContainerStarted","Data":"65a06acba54c7499c055d1499360406b15c55eeeecdff2bd7ec63e1abcad4deb"} Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.888712 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-scripts\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.890002 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.890159 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxmdl\" (UniqueName: \"kubernetes.io/projected/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-kube-api-access-wxmdl\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.891104 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.891246 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-logs\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.891332 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-config-data\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.891510 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.995072 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.995256 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxmdl\" (UniqueName: \"kubernetes.io/projected/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-kube-api-access-wxmdl\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.995426 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.995506 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-logs\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:45 crc kubenswrapper[4780]: I1210 11:14:45.995627 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-config-data\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.001024 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:45.995725 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.006360 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-scripts\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.003123 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.002724 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-logs\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.013415 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.015840 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-scripts\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.038974 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-config-data\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.058018 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxmdl\" (UniqueName: \"kubernetes.io/projected/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-kube-api-access-wxmdl\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.115296 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.202591 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.208904 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.214115 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.296423 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.330364 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.331196 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.331349 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r26m\" (UniqueName: \"kubernetes.io/projected/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-kube-api-access-9r26m\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.331684 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.331866 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.332125 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.332352 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-logs\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:46 crc kubenswrapper[4780]: I1210 11:14:46.574838 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.574963 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.575027 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.575084 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-logs\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.575154 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.575180 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.575229 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r26m\" (UniqueName: \"kubernetes.io/projected/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-kube-api-access-9r26m\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.583853 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.586220 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.586565 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-logs\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.592770 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.615953 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.622429 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.629530 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r26m\" (UniqueName: \"kubernetes.io/projected/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-kube-api-access-9r26m\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.638436 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.756559 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="c31145f5-6188-4934-8ceb-a86ac4a0e997" containerName="galera" probeResult="failure" output="command timed out" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.922311 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:46.971364 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:47.315306 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:47.398371 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:14:47 crc kubenswrapper[4780]: I1210 11:14:47.469747 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:14:48 crc kubenswrapper[4780]: I1210 11:14:48.095291 4780 generic.go:334] "Generic (PLEG): container finished" podID="c36308e6-e280-49e1-820c-5649e6ef0077" containerID="9d54bee2525e4d495783477cfe5877847d0f63ea66c5aaf9cda0a964bb4a15bb" exitCode=0 Dec 10 11:14:48 crc kubenswrapper[4780]: I1210 11:14:48.095887 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" event={"ID":"c36308e6-e280-49e1-820c-5649e6ef0077","Type":"ContainerDied","Data":"9d54bee2525e4d495783477cfe5877847d0f63ea66c5aaf9cda0a964bb4a15bb"} Dec 10 11:14:48 crc kubenswrapper[4780]: I1210 11:14:48.141838 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cwb2h" event={"ID":"0613207e-c071-4295-a536-f037ee6fe446","Type":"ContainerStarted","Data":"3502d3dd4df4120fbd326913b61950fb7220222a5f3c9dd175ae7dbd20b0f414"} Dec 10 11:14:48 crc kubenswrapper[4780]: I1210 11:14:48.204744 4780 generic.go:334] "Generic (PLEG): container finished" podID="2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" containerID="682cee52b2fd5e7415a9c68aabca9fef28dd69ca27f7e65f18888ea44fefae3c" exitCode=0 Dec 10 11:14:48 crc kubenswrapper[4780]: I1210 11:14:48.204823 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" event={"ID":"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1","Type":"ContainerDied","Data":"682cee52b2fd5e7415a9c68aabca9fef28dd69ca27f7e65f18888ea44fefae3c"} Dec 10 11:14:48 crc kubenswrapper[4780]: I1210 11:14:48.290297 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-cwb2h" podStartSLOduration=10.290260853 podStartE2EDuration="10.290260853s" podCreationTimestamp="2025-12-10 11:14:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:48.266041725 +0000 UTC m=+1793.119435168" watchObservedRunningTime="2025-12-10 11:14:48.290260853 +0000 UTC m=+1793.143654296" Dec 10 11:14:48 crc kubenswrapper[4780]: I1210 11:14:48.965085 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:14:48 crc kubenswrapper[4780]: E1210 11:14:48.966477 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:14:50 crc kubenswrapper[4780]: I1210 11:14:50.801127 4780 generic.go:334] "Generic (PLEG): container finished" podID="a75f82ba-b1e5-45cc-8e35-dd8c75c21247" containerID="4d7c345aeebdb291cff6d296f990d290230994fa4233b689d727d9d9f2d07bfe" exitCode=0 Dec 10 11:14:50 crc kubenswrapper[4780]: I1210 11:14:50.802036 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a75f82ba-b1e5-45cc-8e35-dd8c75c21247","Type":"ContainerDied","Data":"4d7c345aeebdb291cff6d296f990d290230994fa4233b689d727d9d9f2d07bfe"} Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.067871 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.131203 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.220833 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-nb\") pod \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.221310 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-svc\") pod \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.221501 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9khd\" (UniqueName: \"kubernetes.io/projected/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-kube-api-access-f9khd\") pod \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.221704 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-config\") pod \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.221887 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-sb\") pod \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.221987 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-swift-storage-0\") pod \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\" (UID: \"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.277885 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" event={"ID":"2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1","Type":"ContainerDied","Data":"ea2258f451e6db15fb06c2d72e04b97990c0acd75649cad46f34e76929d0bd42"} Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.334768 4780 scope.go:117] "RemoveContainer" containerID="682cee52b2fd5e7415a9c68aabca9fef28dd69ca27f7e65f18888ea44fefae3c" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.343749 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-swift-storage-0\") pod \"c36308e6-e280-49e1-820c-5649e6ef0077\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.343842 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-svc\") pod \"c36308e6-e280-49e1-820c-5649e6ef0077\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.344027 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-sb\") pod \"c36308e6-e280-49e1-820c-5649e6ef0077\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.344303 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6l2g5\" (UniqueName: \"kubernetes.io/projected/c36308e6-e280-49e1-820c-5649e6ef0077-kube-api-access-6l2g5\") pod \"c36308e6-e280-49e1-820c-5649e6ef0077\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.344390 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-config\") pod \"c36308e6-e280-49e1-820c-5649e6ef0077\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.344586 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-nb\") pod \"c36308e6-e280-49e1-820c-5649e6ef0077\" (UID: \"c36308e6-e280-49e1-820c-5649e6ef0077\") " Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.354856 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-kube-api-access-f9khd" (OuterVolumeSpecName: "kube-api-access-f9khd") pod "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" (UID: "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1"). InnerVolumeSpecName "kube-api-access-f9khd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.378601 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-config" (OuterVolumeSpecName: "config") pod "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" (UID: "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.434322 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c36308e6-e280-49e1-820c-5649e6ef0077-kube-api-access-6l2g5" (OuterVolumeSpecName: "kube-api-access-6l2g5") pod "c36308e6-e280-49e1-820c-5649e6ef0077" (UID: "c36308e6-e280-49e1-820c-5649e6ef0077"). InnerVolumeSpecName "kube-api-access-6l2g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.434776 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" event={"ID":"c36308e6-e280-49e1-820c-5649e6ef0077","Type":"ContainerDied","Data":"58a92a33d3cf930b42f0766417173935c4837adaf9b40532c35788536acc56cb"} Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.434970 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-vxt8c" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.449176 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6l2g5\" (UniqueName: \"kubernetes.io/projected/c36308e6-e280-49e1-820c-5649e6ef0077-kube-api-access-6l2g5\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.449292 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9khd\" (UniqueName: \"kubernetes.io/projected/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-kube-api-access-f9khd\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.449337 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.487849 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" (UID: "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.499486 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.544240 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-jppm6"] Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.583240 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.623875 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" (UID: "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.663081 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" (UID: "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.689525 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.689568 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.737280 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" (UID: "2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.765106 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c36308e6-e280-49e1-820c-5649e6ef0077" (UID: "c36308e6-e280-49e1-820c-5649e6ef0077"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.765772 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c36308e6-e280-49e1-820c-5649e6ef0077" (UID: "c36308e6-e280-49e1-820c-5649e6ef0077"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.766516 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c36308e6-e280-49e1-820c-5649e6ef0077" (UID: "c36308e6-e280-49e1-820c-5649e6ef0077"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: W1210 11:14:52.784845 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebf6ddef_a612_46b0_bc78_ed1ab89c525f.slice/crio-750c5dff7f6d2b49339fce4c2c0fcc25b3b36e85d0a038f29f872b14746c76f0 WatchSource:0}: Error finding container 750c5dff7f6d2b49339fce4c2c0fcc25b3b36e85d0a038f29f872b14746c76f0: Status 404 returned error can't find the container with id 750c5dff7f6d2b49339fce4c2c0fcc25b3b36e85d0a038f29f872b14746c76f0 Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.805283 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.805339 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.805354 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.805372 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.812364 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c36308e6-e280-49e1-820c-5649e6ef0077" (UID: "c36308e6-e280-49e1-820c-5649e6ef0077"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: W1210 11:14:52.825224 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cf4f40c_d9e0_46ea_bfa6_cfa15257d19a.slice/crio-8774574c49525d5aacd5ff747b8efa04e35ecaebebcd709854bb7b53445749f0 WatchSource:0}: Error finding container 8774574c49525d5aacd5ff747b8efa04e35ecaebebcd709854bb7b53445749f0: Status 404 returned error can't find the container with id 8774574c49525d5aacd5ff747b8efa04e35ecaebebcd709854bb7b53445749f0 Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.839670 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-config" (OuterVolumeSpecName: "config") pod "c36308e6-e280-49e1-820c-5649e6ef0077" (UID: "c36308e6-e280-49e1-820c-5649e6ef0077"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.843939 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.915519 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.915589 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c36308e6-e280-49e1-820c-5649e6ef0077-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:14:52 crc kubenswrapper[4780]: I1210 11:14:52.917068 4780 scope.go:117] "RemoveContainer" containerID="9d54bee2525e4d495783477cfe5877847d0f63ea66c5aaf9cda0a964bb4a15bb" Dec 10 11:14:53 crc kubenswrapper[4780]: I1210 11:14:53.243247 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-vxt8c"] Dec 10 11:14:53 crc kubenswrapper[4780]: I1210 11:14:53.330200 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-vxt8c"] Dec 10 11:14:53 crc kubenswrapper[4780]: I1210 11:14:53.529864 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" event={"ID":"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a","Type":"ContainerStarted","Data":"8774574c49525d5aacd5ff747b8efa04e35ecaebebcd709854bb7b53445749f0"} Dec 10 11:14:53 crc kubenswrapper[4780]: I1210 11:14:53.575627 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df","Type":"ContainerStarted","Data":"d7fac1b72216c396052dc20d5e8011c0269000cab2b0630abf5b5f7d5d4d3807"} Dec 10 11:14:53 crc kubenswrapper[4780]: I1210 11:14:53.651060 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ebf6ddef-a612-46b0-bc78-ed1ab89c525f","Type":"ContainerStarted","Data":"750c5dff7f6d2b49339fce4c2c0fcc25b3b36e85d0a038f29f872b14746c76f0"} Dec 10 11:14:53 crc kubenswrapper[4780]: I1210 11:14:53.707322 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-v5dwx" Dec 10 11:14:53 crc kubenswrapper[4780]: I1210 11:14:53.796109 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a75f82ba-b1e5-45cc-8e35-dd8c75c21247","Type":"ContainerStarted","Data":"9209727e836dcaa4abc6c0f87389561441a6d44cb01427ba13322b24a8d7ef6d"} Dec 10 11:14:53 crc kubenswrapper[4780]: I1210 11:14:53.913092 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-v5dwx"] Dec 10 11:14:54 crc kubenswrapper[4780]: I1210 11:14:54.015557 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c36308e6-e280-49e1-820c-5649e6ef0077" path="/var/lib/kubelet/pods/c36308e6-e280-49e1-820c-5649e6ef0077/volumes" Dec 10 11:14:54 crc kubenswrapper[4780]: I1210 11:14:54.016666 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-v5dwx"] Dec 10 11:14:56 crc kubenswrapper[4780]: I1210 11:14:56.010088 4780 generic.go:334] "Generic (PLEG): container finished" podID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" containerID="88f1eb62beda2093547d81dd9a04f959fee0e9d6a5b4ddc6da0d6599b1d33956" exitCode=0 Dec 10 11:14:56 crc kubenswrapper[4780]: I1210 11:14:56.048529 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" path="/var/lib/kubelet/pods/2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1/volumes" Dec 10 11:14:56 crc kubenswrapper[4780]: I1210 11:14:56.049896 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" event={"ID":"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a","Type":"ContainerDied","Data":"88f1eb62beda2093547d81dd9a04f959fee0e9d6a5b4ddc6da0d6599b1d33956"} Dec 10 11:14:56 crc kubenswrapper[4780]: I1210 11:14:56.125838 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df","Type":"ContainerStarted","Data":"6bbc21d9af610cb20a0ff6278ae968cb5f5d92b47b2a8fa149eff3e16e57a619"} Dec 10 11:14:56 crc kubenswrapper[4780]: I1210 11:14:56.167217 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ebf6ddef-a612-46b0-bc78-ed1ab89c525f","Type":"ContainerStarted","Data":"045a6c11b33ce5f99c19819fa5372767b9adf2abef1b9e90d56a73f23055283a"} Dec 10 11:14:57 crc kubenswrapper[4780]: I1210 11:14:57.197907 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:14:58 crc kubenswrapper[4780]: I1210 11:14:58.342333 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df","Type":"ContainerStarted","Data":"b9e8ea8a07b3d91c836124c1a7faeedb3e809a759e52fdb1b9fbbaee199646ba"} Dec 10 11:14:58 crc kubenswrapper[4780]: I1210 11:14:58.343599 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerName="glance-log" containerID="cri-o://6bbc21d9af610cb20a0ff6278ae968cb5f5d92b47b2a8fa149eff3e16e57a619" gracePeriod=30 Dec 10 11:14:58 crc kubenswrapper[4780]: I1210 11:14:58.343729 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerName="glance-httpd" containerID="cri-o://b9e8ea8a07b3d91c836124c1a7faeedb3e809a759e52fdb1b9fbbaee199646ba" gracePeriod=30 Dec 10 11:14:58 crc kubenswrapper[4780]: I1210 11:14:58.368832 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" event={"ID":"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a","Type":"ContainerStarted","Data":"381eef57163efa671b512b67a68dd98bb6df98d41dec1d5227db4ca11e682322"} Dec 10 11:14:58 crc kubenswrapper[4780]: I1210 11:14:58.369935 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:14:58 crc kubenswrapper[4780]: I1210 11:14:58.394749 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=12.394709914 podStartE2EDuration="12.394709914s" podCreationTimestamp="2025-12-10 11:14:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:58.377073174 +0000 UTC m=+1803.230466617" watchObservedRunningTime="2025-12-10 11:14:58.394709914 +0000 UTC m=+1803.248103357" Dec 10 11:14:58 crc kubenswrapper[4780]: I1210 11:14:58.473243 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" podStartSLOduration=15.473208927 podStartE2EDuration="15.473208927s" podCreationTimestamp="2025-12-10 11:14:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:58.459513657 +0000 UTC m=+1803.312907110" watchObservedRunningTime="2025-12-10 11:14:58.473208927 +0000 UTC m=+1803.326602370" Dec 10 11:14:59 crc kubenswrapper[4780]: I1210 11:14:59.518147 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ebf6ddef-a612-46b0-bc78-ed1ab89c525f","Type":"ContainerStarted","Data":"a2b9283618d6adf423c85f4734df6b09f5e1f99a93e918c3c1b7fde7a2db459c"} Dec 10 11:14:59 crc kubenswrapper[4780]: I1210 11:14:59.518681 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerName="glance-log" containerID="cri-o://045a6c11b33ce5f99c19819fa5372767b9adf2abef1b9e90d56a73f23055283a" gracePeriod=30 Dec 10 11:14:59 crc kubenswrapper[4780]: I1210 11:14:59.518941 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerName="glance-httpd" containerID="cri-o://a2b9283618d6adf423c85f4734df6b09f5e1f99a93e918c3c1b7fde7a2db459c" gracePeriod=30 Dec 10 11:14:59 crc kubenswrapper[4780]: I1210 11:14:59.567419 4780 generic.go:334] "Generic (PLEG): container finished" podID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerID="b9e8ea8a07b3d91c836124c1a7faeedb3e809a759e52fdb1b9fbbaee199646ba" exitCode=143 Dec 10 11:14:59 crc kubenswrapper[4780]: I1210 11:14:59.567505 4780 generic.go:334] "Generic (PLEG): container finished" podID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerID="6bbc21d9af610cb20a0ff6278ae968cb5f5d92b47b2a8fa149eff3e16e57a619" exitCode=143 Dec 10 11:14:59 crc kubenswrapper[4780]: I1210 11:14:59.567794 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df","Type":"ContainerDied","Data":"b9e8ea8a07b3d91c836124c1a7faeedb3e809a759e52fdb1b9fbbaee199646ba"} Dec 10 11:14:59 crc kubenswrapper[4780]: I1210 11:14:59.568416 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df","Type":"ContainerDied","Data":"6bbc21d9af610cb20a0ff6278ae968cb5f5d92b47b2a8fa149eff3e16e57a619"} Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.165787 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=17.165745018 podStartE2EDuration="17.165745018s" podCreationTimestamp="2025-12-10 11:14:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:14:59.59340887 +0000 UTC m=+1804.446802333" watchObservedRunningTime="2025-12-10 11:15:00.165745018 +0000 UTC m=+1805.019138461" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.183359 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k"] Dec 10 11:15:00 crc kubenswrapper[4780]: E1210 11:15:00.184776 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" containerName="init" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.184805 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" containerName="init" Dec 10 11:15:00 crc kubenswrapper[4780]: E1210 11:15:00.184894 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c36308e6-e280-49e1-820c-5649e6ef0077" containerName="init" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.184908 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c36308e6-e280-49e1-820c-5649e6ef0077" containerName="init" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.185286 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d3f5d7b-dc9c-464c-ba00-fbe7212e9db1" containerName="init" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.185307 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c36308e6-e280-49e1-820c-5649e6ef0077" containerName="init" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.186838 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.196905 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.197186 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.246755 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k"] Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.410906 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c026cff-71ab-4b14-87af-38c8f42ccac1-secret-volume\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.411047 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c026cff-71ab-4b14-87af-38c8f42ccac1-config-volume\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.411207 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gv68\" (UniqueName: \"kubernetes.io/projected/9c026cff-71ab-4b14-87af-38c8f42ccac1-kube-api-access-6gv68\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.514048 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c026cff-71ab-4b14-87af-38c8f42ccac1-secret-volume\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.514194 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c026cff-71ab-4b14-87af-38c8f42ccac1-config-volume\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.514421 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gv68\" (UniqueName: \"kubernetes.io/projected/9c026cff-71ab-4b14-87af-38c8f42ccac1-kube-api-access-6gv68\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.516218 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c026cff-71ab-4b14-87af-38c8f42ccac1-config-volume\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.529629 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c026cff-71ab-4b14-87af-38c8f42ccac1-secret-volume\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:00 crc kubenswrapper[4780]: I1210 11:15:00.540873 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gv68\" (UniqueName: \"kubernetes.io/projected/9c026cff-71ab-4b14-87af-38c8f42ccac1-kube-api-access-6gv68\") pod \"collect-profiles-29422755-2ck6k\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:01 crc kubenswrapper[4780]: I1210 11:15:00.597309 4780 generic.go:334] "Generic (PLEG): container finished" podID="544e0fa4-06de-4bd3-8793-303a741a1e53" containerID="2be4b9bdd746ab4e7cbde6d8d26a1dd11549ed2a7276bde6dfb93d5b25013263" exitCode=0 Dec 10 11:15:01 crc kubenswrapper[4780]: I1210 11:15:00.597423 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jzgb2" event={"ID":"544e0fa4-06de-4bd3-8793-303a741a1e53","Type":"ContainerDied","Data":"2be4b9bdd746ab4e7cbde6d8d26a1dd11549ed2a7276bde6dfb93d5b25013263"} Dec 10 11:15:01 crc kubenswrapper[4780]: I1210 11:15:00.605139 4780 generic.go:334] "Generic (PLEG): container finished" podID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerID="a2b9283618d6adf423c85f4734df6b09f5e1f99a93e918c3c1b7fde7a2db459c" exitCode=0 Dec 10 11:15:01 crc kubenswrapper[4780]: I1210 11:15:00.605208 4780 generic.go:334] "Generic (PLEG): container finished" podID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerID="045a6c11b33ce5f99c19819fa5372767b9adf2abef1b9e90d56a73f23055283a" exitCode=143 Dec 10 11:15:01 crc kubenswrapper[4780]: I1210 11:15:00.605260 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ebf6ddef-a612-46b0-bc78-ed1ab89c525f","Type":"ContainerDied","Data":"a2b9283618d6adf423c85f4734df6b09f5e1f99a93e918c3c1b7fde7a2db459c"} Dec 10 11:15:01 crc kubenswrapper[4780]: I1210 11:15:00.605318 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ebf6ddef-a612-46b0-bc78-ed1ab89c525f","Type":"ContainerDied","Data":"045a6c11b33ce5f99c19819fa5372767b9adf2abef1b9e90d56a73f23055283a"} Dec 10 11:15:01 crc kubenswrapper[4780]: I1210 11:15:01.253331 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:02 crc kubenswrapper[4780]: I1210 11:15:02.328509 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:15:02 crc kubenswrapper[4780]: I1210 11:15:02.484883 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-86zlx"] Dec 10 11:15:02 crc kubenswrapper[4780]: I1210 11:15:02.495087 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-86zlx" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" containerID="cri-o://8af88366bfa447c6c69e5fab61649911eb8e5f373bb2dae0e0d88b7fae52f781" gracePeriod=10 Dec 10 11:15:02 crc kubenswrapper[4780]: I1210 11:15:02.682250 4780 generic.go:334] "Generic (PLEG): container finished" podID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerID="8af88366bfa447c6c69e5fab61649911eb8e5f373bb2dae0e0d88b7fae52f781" exitCode=0 Dec 10 11:15:02 crc kubenswrapper[4780]: I1210 11:15:02.682800 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-86zlx" event={"ID":"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7","Type":"ContainerDied","Data":"8af88366bfa447c6c69e5fab61649911eb8e5f373bb2dae0e0d88b7fae52f781"} Dec 10 11:15:03 crc kubenswrapper[4780]: I1210 11:15:03.048347 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:15:03 crc kubenswrapper[4780]: E1210 11:15:03.048687 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:15:04 crc kubenswrapper[4780]: I1210 11:15:04.362649 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-86zlx" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Dec 10 11:15:04 crc kubenswrapper[4780]: I1210 11:15:04.733409 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a75f82ba-b1e5-45cc-8e35-dd8c75c21247","Type":"ContainerStarted","Data":"1df453bc26cc06b2dfc5eaa22b844c0eb5612c56babbbd75dfb1858c2b70fdaf"} Dec 10 11:15:09 crc kubenswrapper[4780]: I1210 11:15:09.363518 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-86zlx" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Dec 10 11:15:14 crc kubenswrapper[4780]: I1210 11:15:14.362763 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-86zlx" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Dec 10 11:15:14 crc kubenswrapper[4780]: I1210 11:15:14.363604 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:15:15 crc kubenswrapper[4780]: E1210 11:15:15.504615 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Dec 10 11:15:15 crc kubenswrapper[4780]: E1210 11:15:15.504928 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f6h8ch546h5bhc6h5fbh687h566h665h699h556h5dh67h98h697h67dh68chbch68hc5hd9hcch664h5bdh645h544h569h5d7h64dh599h58dh5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-994dj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(d74fb1d1-0533-4202-9ee5-4a1c04ca6971): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:15:15 crc kubenswrapper[4780]: I1210 11:15:15.983006 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:15:15 crc kubenswrapper[4780]: E1210 11:15:15.985009 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:15:17 crc kubenswrapper[4780]: I1210 11:15:17.399695 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:15:17 crc kubenswrapper[4780]: I1210 11:15:17.400366 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:15:17 crc kubenswrapper[4780]: I1210 11:15:17.470656 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:17 crc kubenswrapper[4780]: I1210 11:15:17.470818 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:18 crc kubenswrapper[4780]: E1210 11:15:18.060745 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Dec 10 11:15:18 crc kubenswrapper[4780]: E1210 11:15:18.061046 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-clw5l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-hzmqc_openstack(61593545-0480-4729-b6d1-ba4089e68f7a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:15:18 crc kubenswrapper[4780]: E1210 11:15:18.062975 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-hzmqc" podUID="61593545-0480-4729-b6d1-ba4089e68f7a" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.256513 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.271891 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.446702 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-combined-ca-bundle\") pod \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.446780 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-credential-keys\") pod \"544e0fa4-06de-4bd3-8793-303a741a1e53\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.446881 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-combined-ca-bundle\") pod \"544e0fa4-06de-4bd3-8793-303a741a1e53\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447024 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-config-data\") pod \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447073 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-config-data\") pod \"544e0fa4-06de-4bd3-8793-303a741a1e53\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447101 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r26m\" (UniqueName: \"kubernetes.io/projected/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-kube-api-access-9r26m\") pod \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447185 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-logs\") pod \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447243 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-scripts\") pod \"544e0fa4-06de-4bd3-8793-303a741a1e53\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447267 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5snk\" (UniqueName: \"kubernetes.io/projected/544e0fa4-06de-4bd3-8793-303a741a1e53-kube-api-access-b5snk\") pod \"544e0fa4-06de-4bd3-8793-303a741a1e53\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447330 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-scripts\") pod \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447367 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447425 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-fernet-keys\") pod \"544e0fa4-06de-4bd3-8793-303a741a1e53\" (UID: \"544e0fa4-06de-4bd3-8793-303a741a1e53\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.447497 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-httpd-run\") pod \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\" (UID: \"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df\") " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.449343 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" (UID: "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.451436 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-logs" (OuterVolumeSpecName: "logs") pod "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" (UID: "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.466250 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" (UID: "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.467080 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-kube-api-access-9r26m" (OuterVolumeSpecName: "kube-api-access-9r26m") pod "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" (UID: "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df"). InnerVolumeSpecName "kube-api-access-9r26m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.467206 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/544e0fa4-06de-4bd3-8793-303a741a1e53-kube-api-access-b5snk" (OuterVolumeSpecName: "kube-api-access-b5snk") pod "544e0fa4-06de-4bd3-8793-303a741a1e53" (UID: "544e0fa4-06de-4bd3-8793-303a741a1e53"). InnerVolumeSpecName "kube-api-access-b5snk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.471486 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "544e0fa4-06de-4bd3-8793-303a741a1e53" (UID: "544e0fa4-06de-4bd3-8793-303a741a1e53"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.484858 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-scripts" (OuterVolumeSpecName: "scripts") pod "544e0fa4-06de-4bd3-8793-303a741a1e53" (UID: "544e0fa4-06de-4bd3-8793-303a741a1e53"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.485030 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-scripts" (OuterVolumeSpecName: "scripts") pod "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" (UID: "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.485106 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "544e0fa4-06de-4bd3-8793-303a741a1e53" (UID: "544e0fa4-06de-4bd3-8793-303a741a1e53"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.514368 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" (UID: "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.527961 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "544e0fa4-06de-4bd3-8793-303a741a1e53" (UID: "544e0fa4-06de-4bd3-8793-303a741a1e53"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.552540 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-config-data" (OuterVolumeSpecName: "config-data") pod "544e0fa4-06de-4bd3-8793-303a741a1e53" (UID: "544e0fa4-06de-4bd3-8793-303a741a1e53"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557237 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557287 4780 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557302 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557316 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557330 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r26m\" (UniqueName: \"kubernetes.io/projected/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-kube-api-access-9r26m\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557346 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557358 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557370 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5snk\" (UniqueName: \"kubernetes.io/projected/544e0fa4-06de-4bd3-8793-303a741a1e53-kube-api-access-b5snk\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557393 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557432 4780 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557444 4780 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/544e0fa4-06de-4bd3-8793-303a741a1e53-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.557456 4780 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.582164 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-config-data" (OuterVolumeSpecName: "config-data") pod "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" (UID: "c2ecbcdb-1f2b-4633-a3e4-fb8b058464df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.626554 4780 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.665841 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:18 crc kubenswrapper[4780]: I1210 11:15:18.667085 4780 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.065172 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-jzgb2" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.065884 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-jzgb2" event={"ID":"544e0fa4-06de-4bd3-8793-303a741a1e53","Type":"ContainerDied","Data":"43a0a522813a45515e48379cbd4bcf6c15969d8884125cd44bfe7b95646d7fa4"} Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.066008 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43a0a522813a45515e48379cbd4bcf6c15969d8884125cd44bfe7b95646d7fa4" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.071207 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c2ecbcdb-1f2b-4633-a3e4-fb8b058464df","Type":"ContainerDied","Data":"d7fac1b72216c396052dc20d5e8011c0269000cab2b0630abf5b5f7d5d4d3807"} Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.071300 4780 scope.go:117] "RemoveContainer" containerID="b9e8ea8a07b3d91c836124c1a7faeedb3e809a759e52fdb1b9fbbaee199646ba" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.071560 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: E1210 11:15:19.077207 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-hzmqc" podUID="61593545-0480-4729-b6d1-ba4089e68f7a" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.145112 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.157643 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.184487 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:15:19 crc kubenswrapper[4780]: E1210 11:15:19.185152 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerName="glance-log" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.185168 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerName="glance-log" Dec 10 11:15:19 crc kubenswrapper[4780]: E1210 11:15:19.185201 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="544e0fa4-06de-4bd3-8793-303a741a1e53" containerName="keystone-bootstrap" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.185210 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="544e0fa4-06de-4bd3-8793-303a741a1e53" containerName="keystone-bootstrap" Dec 10 11:15:19 crc kubenswrapper[4780]: E1210 11:15:19.185233 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerName="glance-httpd" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.185242 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerName="glance-httpd" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.185495 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerName="glance-httpd" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.185524 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" containerName="glance-log" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.185548 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="544e0fa4-06de-4bd3-8793-303a741a1e53" containerName="keystone-bootstrap" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.187290 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.197220 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.197593 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.215730 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.287483 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-logs\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.287547 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.287586 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.287638 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kj6x\" (UniqueName: \"kubernetes.io/projected/ae84be8f-342b-4f21-9aea-4ebf423af61c-kube-api-access-4kj6x\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.287710 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.287760 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.287824 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.287898 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.390034 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.390184 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-logs\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.390218 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.390261 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.390317 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kj6x\" (UniqueName: \"kubernetes.io/projected/ae84be8f-342b-4f21-9aea-4ebf423af61c-kube-api-access-4kj6x\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.390383 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.390472 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.390528 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.391107 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-logs\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.391785 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.392154 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.397233 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.415145 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.424963 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.427341 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.438488 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kj6x\" (UniqueName: \"kubernetes.io/projected/ae84be8f-342b-4f21-9aea-4ebf423af61c-kube-api-access-4kj6x\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.487421 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-jzgb2"] Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.490520 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.504949 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-jzgb2"] Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.543799 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.622575 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-x57qm"] Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.625540 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.630476 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.630575 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.630794 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.632287 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.634867 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-86tfj" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.651556 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-x57qm"] Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.706001 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vggq\" (UniqueName: \"kubernetes.io/projected/79f8dd0a-b900-4a3b-a2ab-262861daedc6-kube-api-access-9vggq\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.706122 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-scripts\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.706214 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-config-data\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.706610 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-fernet-keys\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.706776 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-combined-ca-bundle\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.707309 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-credential-keys\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.809625 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-fernet-keys\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.809715 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-combined-ca-bundle\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.809832 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-credential-keys\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.809902 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vggq\" (UniqueName: \"kubernetes.io/projected/79f8dd0a-b900-4a3b-a2ab-262861daedc6-kube-api-access-9vggq\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.809964 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-scripts\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.810040 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-config-data\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.818224 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-credential-keys\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.818369 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-scripts\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.818800 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-fernet-keys\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.821888 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-combined-ca-bundle\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.829741 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-config-data\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.832742 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vggq\" (UniqueName: \"kubernetes.io/projected/79f8dd0a-b900-4a3b-a2ab-262861daedc6-kube-api-access-9vggq\") pod \"keystone-bootstrap-x57qm\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.966297 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.977429 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="544e0fa4-06de-4bd3-8793-303a741a1e53" path="/var/lib/kubelet/pods/544e0fa4-06de-4bd3-8793-303a741a1e53/volumes" Dec 10 11:15:19 crc kubenswrapper[4780]: I1210 11:15:19.980339 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2ecbcdb-1f2b-4633-a3e4-fb8b058464df" path="/var/lib/kubelet/pods/c2ecbcdb-1f2b-4633-a3e4-fb8b058464df/volumes" Dec 10 11:15:24 crc kubenswrapper[4780]: I1210 11:15:24.372309 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-86zlx" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: i/o timeout" Dec 10 11:15:28 crc kubenswrapper[4780]: I1210 11:15:28.960363 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:15:28 crc kubenswrapper[4780]: E1210 11:15:28.961754 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:15:29 crc kubenswrapper[4780]: I1210 11:15:29.374301 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-86zlx" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: i/o timeout" Dec 10 11:15:32 crc kubenswrapper[4780]: E1210 11:15:32.392642 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Dec 10 11:15:32 crc kubenswrapper[4780]: E1210 11:15:32.393293 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gtvp7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-hn59m_openstack(1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:15:32 crc kubenswrapper[4780]: E1210 11:15:32.394559 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-hn59m" podUID="1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.574178 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.590320 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672315 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxmdl\" (UniqueName: \"kubernetes.io/projected/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-kube-api-access-wxmdl\") pod \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672457 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-dns-svc\") pod \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672549 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-config-data\") pod \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672630 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-config\") pod \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672665 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-sb\") pod \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672784 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zgn4\" (UniqueName: \"kubernetes.io/projected/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-kube-api-access-4zgn4\") pod \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672816 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-logs\") pod \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672883 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-httpd-run\") pod \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.672952 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.673065 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-nb\") pod \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\" (UID: \"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.673099 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-scripts\") pod \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.673144 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-combined-ca-bundle\") pod \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\" (UID: \"ebf6ddef-a612-46b0-bc78-ed1ab89c525f\") " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.674002 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-logs" (OuterVolumeSpecName: "logs") pod "ebf6ddef-a612-46b0-bc78-ed1ab89c525f" (UID: "ebf6ddef-a612-46b0-bc78-ed1ab89c525f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.684113 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ebf6ddef-a612-46b0-bc78-ed1ab89c525f" (UID: "ebf6ddef-a612-46b0-bc78-ed1ab89c525f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.709953 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-scripts" (OuterVolumeSpecName: "scripts") pod "ebf6ddef-a612-46b0-bc78-ed1ab89c525f" (UID: "ebf6ddef-a612-46b0-bc78-ed1ab89c525f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.725315 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-kube-api-access-wxmdl" (OuterVolumeSpecName: "kube-api-access-wxmdl") pod "ebf6ddef-a612-46b0-bc78-ed1ab89c525f" (UID: "ebf6ddef-a612-46b0-bc78-ed1ab89c525f"). InnerVolumeSpecName "kube-api-access-wxmdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.730186 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "ebf6ddef-a612-46b0-bc78-ed1ab89c525f" (UID: "ebf6ddef-a612-46b0-bc78-ed1ab89c525f"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.730701 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ebf6ddef-a612-46b0-bc78-ed1ab89c525f" (UID: "ebf6ddef-a612-46b0-bc78-ed1ab89c525f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.731405 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-kube-api-access-4zgn4" (OuterVolumeSpecName: "kube-api-access-4zgn4") pod "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" (UID: "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7"). InnerVolumeSpecName "kube-api-access-4zgn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.766966 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-config" (OuterVolumeSpecName: "config") pod "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" (UID: "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.769015 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" (UID: "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.776905 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.777521 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zgn4\" (UniqueName: \"kubernetes.io/projected/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-kube-api-access-4zgn4\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.777772 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.777860 4780 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.778009 4780 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.778122 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.778222 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.778320 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxmdl\" (UniqueName: \"kubernetes.io/projected/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-kube-api-access-wxmdl\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.778406 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.779807 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" (UID: "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.796825 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" (UID: "0d1d0d57-d4fe-4b96-b59b-0dad3be263d7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.797481 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-config-data" (OuterVolumeSpecName: "config-data") pod "ebf6ddef-a612-46b0-bc78-ed1ab89c525f" (UID: "ebf6ddef-a612-46b0-bc78-ed1ab89c525f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.810768 4780 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.881519 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebf6ddef-a612-46b0-bc78-ed1ab89c525f-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.881580 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.881609 4780 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:32 crc kubenswrapper[4780]: I1210 11:15:32.881626 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.331373 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-86zlx" event={"ID":"0d1d0d57-d4fe-4b96-b59b-0dad3be263d7","Type":"ContainerDied","Data":"40fa5c6569ed39e3e81dedab39ff65d972c47dc75f6ce8743187876723c0cd46"} Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.331406 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-86zlx" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.334082 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ebf6ddef-a612-46b0-bc78-ed1ab89c525f","Type":"ContainerDied","Data":"750c5dff7f6d2b49339fce4c2c0fcc25b3b36e85d0a038f29f872b14746c76f0"} Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.334117 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: E1210 11:15:33.337383 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-hn59m" podUID="1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.390070 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-86zlx"] Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.459278 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-86zlx"] Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.481755 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.517662 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.534177 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:15:33 crc kubenswrapper[4780]: E1210 11:15:33.535007 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerName="glance-log" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.535032 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerName="glance-log" Dec 10 11:15:33 crc kubenswrapper[4780]: E1210 11:15:33.535048 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerName="glance-httpd" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.535055 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerName="glance-httpd" Dec 10 11:15:33 crc kubenswrapper[4780]: E1210 11:15:33.535086 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.535094 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" Dec 10 11:15:33 crc kubenswrapper[4780]: E1210 11:15:33.535110 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="init" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.535117 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="init" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.535442 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerName="glance-log" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.535462 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.535471 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" containerName="glance-httpd" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.537176 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.540627 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.541086 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.549067 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.602213 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.602769 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.603185 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-scripts\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.603320 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-config-data\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.603496 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b98m\" (UniqueName: \"kubernetes.io/projected/9fd7774f-b900-4586-a38d-d7fb0e4991f7-kube-api-access-7b98m\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.603679 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.603864 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-logs\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.604416 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.712858 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-logs\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.713285 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.713480 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.713553 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.713554 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-logs\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.713619 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.713894 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.714038 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-scripts\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.714115 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-config-data\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.714208 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b98m\" (UniqueName: \"kubernetes.io/projected/9fd7774f-b900-4586-a38d-d7fb0e4991f7-kube-api-access-7b98m\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.714308 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.722906 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-scripts\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.723045 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.723816 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-config-data\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.731965 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.738110 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b98m\" (UniqueName: \"kubernetes.io/projected/9fd7774f-b900-4586-a38d-d7fb0e4991f7-kube-api-access-7b98m\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.757820 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.875897 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.979791 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" path="/var/lib/kubelet/pods/0d1d0d57-d4fe-4b96-b59b-0dad3be263d7/volumes" Dec 10 11:15:33 crc kubenswrapper[4780]: I1210 11:15:33.982869 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebf6ddef-a612-46b0-bc78-ed1ab89c525f" path="/var/lib/kubelet/pods/ebf6ddef-a612-46b0-bc78-ed1ab89c525f/volumes" Dec 10 11:15:34 crc kubenswrapper[4780]: I1210 11:15:34.375376 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-86zlx" podUID="0d1d0d57-d4fe-4b96-b59b-0dad3be263d7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: i/o timeout" Dec 10 11:15:34 crc kubenswrapper[4780]: E1210 11:15:34.434160 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 10 11:15:34 crc kubenswrapper[4780]: E1210 11:15:34.434469 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nhq8m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-wqs8s_openstack(f2a06360-9c37-4ae4-8148-73c37d2be5a4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:15:34 crc kubenswrapper[4780]: E1210 11:15:34.436114 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-wqs8s" podUID="f2a06360-9c37-4ae4-8148-73c37d2be5a4" Dec 10 11:15:34 crc kubenswrapper[4780]: I1210 11:15:34.682368 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k"] Dec 10 11:15:34 crc kubenswrapper[4780]: E1210 11:15:34.826420 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified" Dec 10 11:15:34 crc kubenswrapper[4780]: E1210 11:15:34.826655 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-notification-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f6h8ch546h5bhc6h5fbh687h566h665h699h556h5dh67h98h697h67dh68chbch68hc5hd9hcch664h5bdh645h544h569h5d7h64dh599h58dh5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-notification-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-994dj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/notificationhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(d74fb1d1-0533-4202-9ee5-4a1c04ca6971): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:15:34 crc kubenswrapper[4780]: I1210 11:15:34.840042 4780 scope.go:117] "RemoveContainer" containerID="6bbc21d9af610cb20a0ff6278ae968cb5f5d92b47b2a8fa149eff3e16e57a619" Dec 10 11:15:34 crc kubenswrapper[4780]: I1210 11:15:34.908170 4780 scope.go:117] "RemoveContainer" containerID="8af88366bfa447c6c69e5fab61649911eb8e5f373bb2dae0e0d88b7fae52f781" Dec 10 11:15:35 crc kubenswrapper[4780]: I1210 11:15:35.378957 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" event={"ID":"9c026cff-71ab-4b14-87af-38c8f42ccac1","Type":"ContainerStarted","Data":"ad4a41f10b4ff32086109cc89aeeab7cc1334f37a9739cdbfcc9ab1dc934d7ef"} Dec 10 11:15:35 crc kubenswrapper[4780]: E1210 11:15:35.441366 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-wqs8s" podUID="f2a06360-9c37-4ae4-8148-73c37d2be5a4" Dec 10 11:15:35 crc kubenswrapper[4780]: I1210 11:15:35.535323 4780 scope.go:117] "RemoveContainer" containerID="78e41bb1a9c33cf73cab83ff2b6b11248eaa897021cc13de028de30c3b31c485" Dec 10 11:15:35 crc kubenswrapper[4780]: I1210 11:15:35.574346 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-x57qm"] Dec 10 11:15:35 crc kubenswrapper[4780]: I1210 11:15:35.752599 4780 scope.go:117] "RemoveContainer" containerID="a2b9283618d6adf423c85f4734df6b09f5e1f99a93e918c3c1b7fde7a2db459c" Dec 10 11:15:35 crc kubenswrapper[4780]: I1210 11:15:35.834425 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:15:35 crc kubenswrapper[4780]: I1210 11:15:35.844685 4780 scope.go:117] "RemoveContainer" containerID="045a6c11b33ce5f99c19819fa5372767b9adf2abef1b9e90d56a73f23055283a" Dec 10 11:15:35 crc kubenswrapper[4780]: W1210 11:15:35.850608 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae84be8f_342b_4f21_9aea_4ebf423af61c.slice/crio-1b58f30407051b39005b3a345a6724dd693d48fdcf08affd8fe8c411f53f9b0d WatchSource:0}: Error finding container 1b58f30407051b39005b3a345a6724dd693d48fdcf08affd8fe8c411f53f9b0d: Status 404 returned error can't find the container with id 1b58f30407051b39005b3a345a6724dd693d48fdcf08affd8fe8c411f53f9b0d Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.402328 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:15:36 crc kubenswrapper[4780]: W1210 11:15:36.402633 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9fd7774f_b900_4586_a38d_d7fb0e4991f7.slice/crio-8279fe947159ba9cef1744101f0f5695758c5c9a055587e636bb351ac846c30b WatchSource:0}: Error finding container 8279fe947159ba9cef1744101f0f5695758c5c9a055587e636bb351ac846c30b: Status 404 returned error can't find the container with id 8279fe947159ba9cef1744101f0f5695758c5c9a055587e636bb351ac846c30b Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.429066 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a75f82ba-b1e5-45cc-8e35-dd8c75c21247","Type":"ContainerStarted","Data":"f978028cd8bad9545db881374fe90a6b7c2ed0f5d3b2b494871ca01066ed2e48"} Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.436450 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-vcml6" event={"ID":"253b0c60-6211-4e23-921c-b8c34ccc4e25","Type":"ContainerStarted","Data":"4b80868b847dad59daacf4883d411e8b6a1644f6aacfd5b357c1061b720450e0"} Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.438772 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x57qm" event={"ID":"79f8dd0a-b900-4a3b-a2ab-262861daedc6","Type":"ContainerStarted","Data":"e2fc06a448897cd2dd3290871eb73e021c1c1efa6cf451de6f18c0b389a26fc5"} Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.442804 4780 generic.go:334] "Generic (PLEG): container finished" podID="9c026cff-71ab-4b14-87af-38c8f42ccac1" containerID="c2bb03a116004b6fb136a2cfb449d8105933373c20832a5e610ff0f957121103" exitCode=0 Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.442982 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" event={"ID":"9c026cff-71ab-4b14-87af-38c8f42ccac1","Type":"ContainerDied","Data":"c2bb03a116004b6fb136a2cfb449d8105933373c20832a5e610ff0f957121103"} Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.446210 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ae84be8f-342b-4f21-9aea-4ebf423af61c","Type":"ContainerStarted","Data":"1b58f30407051b39005b3a345a6724dd693d48fdcf08affd8fe8c411f53f9b0d"} Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.467954 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=65.467905639 podStartE2EDuration="1m5.467905639s" podCreationTimestamp="2025-12-10 11:14:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:15:36.463661721 +0000 UTC m=+1841.317055164" watchObservedRunningTime="2025-12-10 11:15:36.467905639 +0000 UTC m=+1841.321299082" Dec 10 11:15:36 crc kubenswrapper[4780]: I1210 11:15:36.524794 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-vcml6" podStartSLOduration=8.294673555 podStartE2EDuration="59.524769649s" podCreationTimestamp="2025-12-10 11:14:37 +0000 UTC" firstStartedPulling="2025-12-10 11:14:41.187092854 +0000 UTC m=+1786.040486297" lastFinishedPulling="2025-12-10 11:15:32.417188948 +0000 UTC m=+1837.270582391" observedRunningTime="2025-12-10 11:15:36.521123806 +0000 UTC m=+1841.374517269" watchObservedRunningTime="2025-12-10 11:15:36.524769649 +0000 UTC m=+1841.378163092" Dec 10 11:15:37 crc kubenswrapper[4780]: I1210 11:15:37.273801 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 10 11:15:37 crc kubenswrapper[4780]: I1210 11:15:37.512644 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hzmqc" event={"ID":"61593545-0480-4729-b6d1-ba4089e68f7a","Type":"ContainerStarted","Data":"a903948cf1b99b4467725000fe94ec620a61470054b3a7bcbacc76574cc99abc"} Dec 10 11:15:37 crc kubenswrapper[4780]: I1210 11:15:37.526256 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9fd7774f-b900-4586-a38d-d7fb0e4991f7","Type":"ContainerStarted","Data":"1b74e2259d47df651ee8f4292b49232970777bddcdd72773fc974ebf09088744"} Dec 10 11:15:37 crc kubenswrapper[4780]: I1210 11:15:37.526411 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9fd7774f-b900-4586-a38d-d7fb0e4991f7","Type":"ContainerStarted","Data":"8279fe947159ba9cef1744101f0f5695758c5c9a055587e636bb351ac846c30b"} Dec 10 11:15:37 crc kubenswrapper[4780]: I1210 11:15:37.553147 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-hzmqc" podStartSLOduration=7.668301826 podStartE2EDuration="59.553118279s" podCreationTimestamp="2025-12-10 11:14:38 +0000 UTC" firstStartedPulling="2025-12-10 11:14:43.717096397 +0000 UTC m=+1788.570489830" lastFinishedPulling="2025-12-10 11:15:35.60191284 +0000 UTC m=+1840.455306283" observedRunningTime="2025-12-10 11:15:37.547275579 +0000 UTC m=+1842.400669022" watchObservedRunningTime="2025-12-10 11:15:37.553118279 +0000 UTC m=+1842.406511722" Dec 10 11:15:37 crc kubenswrapper[4780]: I1210 11:15:37.577046 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x57qm" event={"ID":"79f8dd0a-b900-4a3b-a2ab-262861daedc6","Type":"ContainerStarted","Data":"1d5fb6b09dace4e96b104c1de5d8b4873d0327254dc4f21ad87b3a00f55bdb0d"} Dec 10 11:15:37 crc kubenswrapper[4780]: I1210 11:15:37.604319 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ae84be8f-342b-4f21-9aea-4ebf423af61c","Type":"ContainerStarted","Data":"b029d12eb9344ff501ff60ef04a516ec7c1d43f95113d256f618ca731a444aeb"} Dec 10 11:15:37 crc kubenswrapper[4780]: I1210 11:15:37.610653 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-x57qm" podStartSLOduration=18.610626765 podStartE2EDuration="18.610626765s" podCreationTimestamp="2025-12-10 11:15:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:15:37.605016532 +0000 UTC m=+1842.458409975" watchObservedRunningTime="2025-12-10 11:15:37.610626765 +0000 UTC m=+1842.464020208" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.121297 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.310566 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gv68\" (UniqueName: \"kubernetes.io/projected/9c026cff-71ab-4b14-87af-38c8f42ccac1-kube-api-access-6gv68\") pod \"9c026cff-71ab-4b14-87af-38c8f42ccac1\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.310718 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c026cff-71ab-4b14-87af-38c8f42ccac1-config-volume\") pod \"9c026cff-71ab-4b14-87af-38c8f42ccac1\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.311329 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c026cff-71ab-4b14-87af-38c8f42ccac1-secret-volume\") pod \"9c026cff-71ab-4b14-87af-38c8f42ccac1\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.658522 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.918122 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k" event={"ID":"9c026cff-71ab-4b14-87af-38c8f42ccac1","Type":"ContainerDied","Data":"ad4a41f10b4ff32086109cc89aeeab7cc1334f37a9739cdbfcc9ab1dc934d7ef"} Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.918218 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad4a41f10b4ff32086109cc89aeeab7cc1334f37a9739cdbfcc9ab1dc934d7ef" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.918672 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c026cff-71ab-4b14-87af-38c8f42ccac1-config-volume" (OuterVolumeSpecName: "config-volume") pod "9c026cff-71ab-4b14-87af-38c8f42ccac1" (UID: "9c026cff-71ab-4b14-87af-38c8f42ccac1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.932116 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c026cff-71ab-4b14-87af-38c8f42ccac1-kube-api-access-6gv68" (OuterVolumeSpecName: "kube-api-access-6gv68") pod "9c026cff-71ab-4b14-87af-38c8f42ccac1" (UID: "9c026cff-71ab-4b14-87af-38c8f42ccac1"). InnerVolumeSpecName "kube-api-access-6gv68". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.933109 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gv68\" (UniqueName: \"kubernetes.io/projected/9c026cff-71ab-4b14-87af-38c8f42ccac1-kube-api-access-6gv68\") pod \"9c026cff-71ab-4b14-87af-38c8f42ccac1\" (UID: \"9c026cff-71ab-4b14-87af-38c8f42ccac1\") " Dec 10 11:15:38 crc kubenswrapper[4780]: W1210 11:15:38.933849 4780 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/9c026cff-71ab-4b14-87af-38c8f42ccac1/volumes/kubernetes.io~projected/kube-api-access-6gv68 Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.934057 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c026cff-71ab-4b14-87af-38c8f42ccac1-kube-api-access-6gv68" (OuterVolumeSpecName: "kube-api-access-6gv68") pod "9c026cff-71ab-4b14-87af-38c8f42ccac1" (UID: "9c026cff-71ab-4b14-87af-38c8f42ccac1"). InnerVolumeSpecName "kube-api-access-6gv68". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.934275 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gv68\" (UniqueName: \"kubernetes.io/projected/9c026cff-71ab-4b14-87af-38c8f42ccac1-kube-api-access-6gv68\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.934302 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c026cff-71ab-4b14-87af-38c8f42ccac1-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:38 crc kubenswrapper[4780]: I1210 11:15:38.950769 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c026cff-71ab-4b14-87af-38c8f42ccac1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9c026cff-71ab-4b14-87af-38c8f42ccac1" (UID: "9c026cff-71ab-4b14-87af-38c8f42ccac1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:39 crc kubenswrapper[4780]: I1210 11:15:39.037219 4780 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c026cff-71ab-4b14-87af-38c8f42ccac1-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:39 crc kubenswrapper[4780]: I1210 11:15:39.677346 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ae84be8f-342b-4f21-9aea-4ebf423af61c","Type":"ContainerStarted","Data":"d105d3edbf26886ffbbc006bf2dc1b26d894a59541adaae7e0c1609bc379c05b"} Dec 10 11:15:39 crc kubenswrapper[4780]: I1210 11:15:39.682094 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9fd7774f-b900-4586-a38d-d7fb0e4991f7","Type":"ContainerStarted","Data":"b18c757728afae5e081c02b751aec4d477c1c4d1abe695f931c7a8eced470499"} Dec 10 11:15:39 crc kubenswrapper[4780]: I1210 11:15:39.733327 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=20.733286678 podStartE2EDuration="20.733286678s" podCreationTimestamp="2025-12-10 11:15:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:15:39.718538172 +0000 UTC m=+1844.571931615" watchObservedRunningTime="2025-12-10 11:15:39.733286678 +0000 UTC m=+1844.586680121" Dec 10 11:15:39 crc kubenswrapper[4780]: I1210 11:15:39.770683 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.770622961 podStartE2EDuration="6.770622961s" podCreationTimestamp="2025-12-10 11:15:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:15:39.752374475 +0000 UTC m=+1844.605767918" watchObservedRunningTime="2025-12-10 11:15:39.770622961 +0000 UTC m=+1844.624016394" Dec 10 11:15:41 crc kubenswrapper[4780]: I1210 11:15:41.960616 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:15:41 crc kubenswrapper[4780]: E1210 11:15:41.961008 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:15:43 crc kubenswrapper[4780]: I1210 11:15:43.876418 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 10 11:15:43 crc kubenswrapper[4780]: I1210 11:15:43.877113 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 10 11:15:43 crc kubenswrapper[4780]: I1210 11:15:43.975829 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 10 11:15:43 crc kubenswrapper[4780]: I1210 11:15:43.975976 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 10 11:15:44 crc kubenswrapper[4780]: I1210 11:15:44.760502 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:15:44 crc kubenswrapper[4780]: I1210 11:15:44.761036 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:15:47 crc kubenswrapper[4780]: I1210 11:15:47.269535 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 10 11:15:47 crc kubenswrapper[4780]: I1210 11:15:47.287384 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 10 11:15:47 crc kubenswrapper[4780]: I1210 11:15:47.817856 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d74fb1d1-0533-4202-9ee5-4a1c04ca6971","Type":"ContainerStarted","Data":"d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b"} Dec 10 11:15:47 crc kubenswrapper[4780]: I1210 11:15:47.826109 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 10 11:15:48 crc kubenswrapper[4780]: I1210 11:15:48.857753 4780 generic.go:334] "Generic (PLEG): container finished" podID="61593545-0480-4729-b6d1-ba4089e68f7a" containerID="a903948cf1b99b4467725000fe94ec620a61470054b3a7bcbacc76574cc99abc" exitCode=0 Dec 10 11:15:48 crc kubenswrapper[4780]: I1210 11:15:48.857838 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hzmqc" event={"ID":"61593545-0480-4729-b6d1-ba4089e68f7a","Type":"ContainerDied","Data":"a903948cf1b99b4467725000fe94ec620a61470054b3a7bcbacc76574cc99abc"} Dec 10 11:15:48 crc kubenswrapper[4780]: I1210 11:15:48.870410 4780 generic.go:334] "Generic (PLEG): container finished" podID="79f8dd0a-b900-4a3b-a2ab-262861daedc6" containerID="1d5fb6b09dace4e96b104c1de5d8b4873d0327254dc4f21ad87b3a00f55bdb0d" exitCode=0 Dec 10 11:15:48 crc kubenswrapper[4780]: I1210 11:15:48.871565 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x57qm" event={"ID":"79f8dd0a-b900-4a3b-a2ab-262861daedc6","Type":"ContainerDied","Data":"1d5fb6b09dace4e96b104c1de5d8b4873d0327254dc4f21ad87b3a00f55bdb0d"} Dec 10 11:15:49 crc kubenswrapper[4780]: I1210 11:15:49.544477 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:49 crc kubenswrapper[4780]: I1210 11:15:49.545135 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:49 crc kubenswrapper[4780]: I1210 11:15:49.545153 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:49 crc kubenswrapper[4780]: I1210 11:15:49.545165 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:49 crc kubenswrapper[4780]: I1210 11:15:49.614332 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:49 crc kubenswrapper[4780]: I1210 11:15:49.643433 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:49 crc kubenswrapper[4780]: I1210 11:15:49.896410 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hn59m" event={"ID":"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9","Type":"ContainerStarted","Data":"3d15fd52a10848d5989279b9af385fb02e6adedff6cbb19540bcc05ea3be56ee"} Dec 10 11:15:49 crc kubenswrapper[4780]: I1210 11:15:49.935527 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-hn59m" podStartSLOduration=7.50107236 podStartE2EDuration="1m11.935498763s" podCreationTimestamp="2025-12-10 11:14:38 +0000 UTC" firstStartedPulling="2025-12-10 11:14:44.110280725 +0000 UTC m=+1788.963674168" lastFinishedPulling="2025-12-10 11:15:48.544707128 +0000 UTC m=+1853.398100571" observedRunningTime="2025-12-10 11:15:49.919812593 +0000 UTC m=+1854.773206046" watchObservedRunningTime="2025-12-10 11:15:49.935498763 +0000 UTC m=+1854.788892206" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.637823 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hzmqc" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.651048 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760130 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vggq\" (UniqueName: \"kubernetes.io/projected/79f8dd0a-b900-4a3b-a2ab-262861daedc6-kube-api-access-9vggq\") pod \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760256 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clw5l\" (UniqueName: \"kubernetes.io/projected/61593545-0480-4729-b6d1-ba4089e68f7a-kube-api-access-clw5l\") pod \"61593545-0480-4729-b6d1-ba4089e68f7a\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760298 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-config-data\") pod \"61593545-0480-4729-b6d1-ba4089e68f7a\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760349 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-combined-ca-bundle\") pod \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760390 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-combined-ca-bundle\") pod \"61593545-0480-4729-b6d1-ba4089e68f7a\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760522 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-config-data\") pod \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760627 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61593545-0480-4729-b6d1-ba4089e68f7a-logs\") pod \"61593545-0480-4729-b6d1-ba4089e68f7a\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760809 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-scripts\") pod \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760886 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-fernet-keys\") pod \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.760956 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-credential-keys\") pod \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\" (UID: \"79f8dd0a-b900-4a3b-a2ab-262861daedc6\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.761050 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-scripts\") pod \"61593545-0480-4729-b6d1-ba4089e68f7a\" (UID: \"61593545-0480-4729-b6d1-ba4089e68f7a\") " Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.762196 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61593545-0480-4729-b6d1-ba4089e68f7a-logs" (OuterVolumeSpecName: "logs") pod "61593545-0480-4729-b6d1-ba4089e68f7a" (UID: "61593545-0480-4729-b6d1-ba4089e68f7a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.762762 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61593545-0480-4729-b6d1-ba4089e68f7a-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.780122 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-scripts" (OuterVolumeSpecName: "scripts") pod "79f8dd0a-b900-4a3b-a2ab-262861daedc6" (UID: "79f8dd0a-b900-4a3b-a2ab-262861daedc6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.801030 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "79f8dd0a-b900-4a3b-a2ab-262861daedc6" (UID: "79f8dd0a-b900-4a3b-a2ab-262861daedc6"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.801310 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79f8dd0a-b900-4a3b-a2ab-262861daedc6-kube-api-access-9vggq" (OuterVolumeSpecName: "kube-api-access-9vggq") pod "79f8dd0a-b900-4a3b-a2ab-262861daedc6" (UID: "79f8dd0a-b900-4a3b-a2ab-262861daedc6"). InnerVolumeSpecName "kube-api-access-9vggq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.804568 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61593545-0480-4729-b6d1-ba4089e68f7a-kube-api-access-clw5l" (OuterVolumeSpecName: "kube-api-access-clw5l") pod "61593545-0480-4729-b6d1-ba4089e68f7a" (UID: "61593545-0480-4729-b6d1-ba4089e68f7a"). InnerVolumeSpecName "kube-api-access-clw5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.806685 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-scripts" (OuterVolumeSpecName: "scripts") pod "61593545-0480-4729-b6d1-ba4089e68f7a" (UID: "61593545-0480-4729-b6d1-ba4089e68f7a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.812395 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "79f8dd0a-b900-4a3b-a2ab-262861daedc6" (UID: "79f8dd0a-b900-4a3b-a2ab-262861daedc6"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.844463 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-config-data" (OuterVolumeSpecName: "config-data") pod "79f8dd0a-b900-4a3b-a2ab-262861daedc6" (UID: "79f8dd0a-b900-4a3b-a2ab-262861daedc6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.845738 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-config-data" (OuterVolumeSpecName: "config-data") pod "61593545-0480-4729-b6d1-ba4089e68f7a" (UID: "61593545-0480-4729-b6d1-ba4089e68f7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.849253 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61593545-0480-4729-b6d1-ba4089e68f7a" (UID: "61593545-0480-4729-b6d1-ba4089e68f7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.868791 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.868848 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.868858 4780 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.868873 4780 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.868885 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.868894 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vggq\" (UniqueName: \"kubernetes.io/projected/79f8dd0a-b900-4a3b-a2ab-262861daedc6-kube-api-access-9vggq\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.869503 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79f8dd0a-b900-4a3b-a2ab-262861daedc6" (UID: "79f8dd0a-b900-4a3b-a2ab-262861daedc6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.869770 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clw5l\" (UniqueName: \"kubernetes.io/projected/61593545-0480-4729-b6d1-ba4089e68f7a-kube-api-access-clw5l\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.869800 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.869811 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61593545-0480-4729-b6d1-ba4089e68f7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.923565 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-x57qm" event={"ID":"79f8dd0a-b900-4a3b-a2ab-262861daedc6","Type":"ContainerDied","Data":"e2fc06a448897cd2dd3290871eb73e021c1c1efa6cf451de6f18c0b389a26fc5"} Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.923659 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2fc06a448897cd2dd3290871eb73e021c1c1efa6cf451de6f18c0b389a26fc5" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.923783 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-x57qm" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.960498 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-wqs8s" event={"ID":"f2a06360-9c37-4ae4-8148-73c37d2be5a4","Type":"ContainerStarted","Data":"48344f280bdf19a5857712446887c552fa4f17973c1275cd703846f6e6ceb87f"} Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.975420 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79f8dd0a-b900-4a3b-a2ab-262861daedc6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4780]: I1210 11:15:50.998506 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hzmqc" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.005623 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hzmqc" event={"ID":"61593545-0480-4729-b6d1-ba4089e68f7a","Type":"ContainerDied","Data":"0c2e060d83aa95614722ce2025c0d9bae3ba6776344cb6fd2aa1ab4ecb40148f"} Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.015689 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c2e060d83aa95614722ce2025c0d9bae3ba6776344cb6fd2aa1ab4ecb40148f" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.015876 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.015955 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.028982 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-wqs8s" podStartSLOduration=9.614899098 podStartE2EDuration="1m13.028945754s" podCreationTimestamp="2025-12-10 11:14:38 +0000 UTC" firstStartedPulling="2025-12-10 11:14:45.32815919 +0000 UTC m=+1790.181552633" lastFinishedPulling="2025-12-10 11:15:48.742205846 +0000 UTC m=+1853.595599289" observedRunningTime="2025-12-10 11:15:51.0076039 +0000 UTC m=+1855.860997373" watchObservedRunningTime="2025-12-10 11:15:51.028945754 +0000 UTC m=+1855.882339197" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.155395 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-75499b8cb8-8226n"] Dec 10 11:15:51 crc kubenswrapper[4780]: E1210 11:15:51.156378 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61593545-0480-4729-b6d1-ba4089e68f7a" containerName="placement-db-sync" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.156411 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="61593545-0480-4729-b6d1-ba4089e68f7a" containerName="placement-db-sync" Dec 10 11:15:51 crc kubenswrapper[4780]: E1210 11:15:51.156469 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79f8dd0a-b900-4a3b-a2ab-262861daedc6" containerName="keystone-bootstrap" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.156480 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="79f8dd0a-b900-4a3b-a2ab-262861daedc6" containerName="keystone-bootstrap" Dec 10 11:15:51 crc kubenswrapper[4780]: E1210 11:15:51.156501 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c026cff-71ab-4b14-87af-38c8f42ccac1" containerName="collect-profiles" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.156511 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c026cff-71ab-4b14-87af-38c8f42ccac1" containerName="collect-profiles" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.156843 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c026cff-71ab-4b14-87af-38c8f42ccac1" containerName="collect-profiles" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.156948 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="79f8dd0a-b900-4a3b-a2ab-262861daedc6" containerName="keystone-bootstrap" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.156967 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="61593545-0480-4729-b6d1-ba4089e68f7a" containerName="placement-db-sync" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.159900 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.171405 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.179105 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.179599 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.191877 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-shhvb" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.193292 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.208880 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-75499b8cb8-8226n"] Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.307490 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmsrs\" (UniqueName: \"kubernetes.io/projected/2489cfe4-c4b5-4c9b-9977-02d103de7937-kube-api-access-nmsrs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.307650 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-config-data\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.308120 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-public-tls-certs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.308264 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-internal-tls-certs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.308412 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2489cfe4-c4b5-4c9b-9977-02d103de7937-logs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.308668 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-combined-ca-bundle\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.308902 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-scripts\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.340132 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-8579f5f4d5-t2zl7"] Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.343300 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.354547 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.355576 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.356307 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.356625 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.356898 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-86tfj" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.357413 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.369161 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8579f5f4d5-t2zl7"] Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.414406 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-public-tls-certs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.414523 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-internal-tls-certs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.414630 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2489cfe4-c4b5-4c9b-9977-02d103de7937-logs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.414808 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-combined-ca-bundle\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.415341 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-scripts\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.415496 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmsrs\" (UniqueName: \"kubernetes.io/projected/2489cfe4-c4b5-4c9b-9977-02d103de7937-kube-api-access-nmsrs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.415587 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2489cfe4-c4b5-4c9b-9977-02d103de7937-logs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.415680 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-config-data\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.422981 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-internal-tls-certs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.425471 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-config-data\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.430635 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-scripts\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.449712 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmsrs\" (UniqueName: \"kubernetes.io/projected/2489cfe4-c4b5-4c9b-9977-02d103de7937-kube-api-access-nmsrs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.453115 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-combined-ca-bundle\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.479989 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2489cfe4-c4b5-4c9b-9977-02d103de7937-public-tls-certs\") pod \"placement-75499b8cb8-8226n\" (UID: \"2489cfe4-c4b5-4c9b-9977-02d103de7937\") " pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.519878 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-scripts\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.519963 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wd4b\" (UniqueName: \"kubernetes.io/projected/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-kube-api-access-6wd4b\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.520031 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-internal-tls-certs\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.520058 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-credential-keys\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.520116 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-config-data\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.520278 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-combined-ca-bundle\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.520344 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-fernet-keys\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.520376 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-public-tls-certs\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.536015 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.634694 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-combined-ca-bundle\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.638167 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-fernet-keys\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.639313 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-public-tls-certs\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.639651 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-scripts\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.639784 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wd4b\" (UniqueName: \"kubernetes.io/projected/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-kube-api-access-6wd4b\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.640373 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-internal-tls-certs\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.640518 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-credential-keys\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.640720 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-config-data\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.648793 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-public-tls-certs\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.650365 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-config-data\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.651639 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-scripts\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.654296 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-combined-ca-bundle\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.655051 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-internal-tls-certs\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.656436 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-credential-keys\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.667788 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wd4b\" (UniqueName: \"kubernetes.io/projected/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-kube-api-access-6wd4b\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.695106 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5e7c227f-4a69-4a34-a847-b9bf3e4ed937-fernet-keys\") pod \"keystone-8579f5f4d5-t2zl7\" (UID: \"5e7c227f-4a69-4a34-a847-b9bf3e4ed937\") " pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:51 crc kubenswrapper[4780]: I1210 11:15:51.993485 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:52 crc kubenswrapper[4780]: I1210 11:15:52.451342 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-75499b8cb8-8226n"] Dec 10 11:15:52 crc kubenswrapper[4780]: W1210 11:15:52.487066 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2489cfe4_c4b5_4c9b_9977_02d103de7937.slice/crio-3c7fbe02682c0c461c855077d044f9259325271b4689568e4ddf4d4fb3d77dfc WatchSource:0}: Error finding container 3c7fbe02682c0c461c855077d044f9259325271b4689568e4ddf4d4fb3d77dfc: Status 404 returned error can't find the container with id 3c7fbe02682c0c461c855077d044f9259325271b4689568e4ddf4d4fb3d77dfc Dec 10 11:15:52 crc kubenswrapper[4780]: W1210 11:15:52.678453 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e7c227f_4a69_4a34_a847_b9bf3e4ed937.slice/crio-4a964f364815c5c16c288a49ee383a94eeeccca7af998bbcf44ee1c37ffc763f WatchSource:0}: Error finding container 4a964f364815c5c16c288a49ee383a94eeeccca7af998bbcf44ee1c37ffc763f: Status 404 returned error can't find the container with id 4a964f364815c5c16c288a49ee383a94eeeccca7af998bbcf44ee1c37ffc763f Dec 10 11:15:52 crc kubenswrapper[4780]: I1210 11:15:52.687499 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8579f5f4d5-t2zl7"] Dec 10 11:15:52 crc kubenswrapper[4780]: I1210 11:15:52.960965 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:15:52 crc kubenswrapper[4780]: E1210 11:15:52.961441 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:15:53 crc kubenswrapper[4780]: I1210 11:15:53.072422 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-75499b8cb8-8226n" event={"ID":"2489cfe4-c4b5-4c9b-9977-02d103de7937","Type":"ContainerStarted","Data":"3c7fbe02682c0c461c855077d044f9259325271b4689568e4ddf4d4fb3d77dfc"} Dec 10 11:15:53 crc kubenswrapper[4780]: I1210 11:15:53.080552 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8579f5f4d5-t2zl7" event={"ID":"5e7c227f-4a69-4a34-a847-b9bf3e4ed937","Type":"ContainerStarted","Data":"4a964f364815c5c16c288a49ee383a94eeeccca7af998bbcf44ee1c37ffc763f"} Dec 10 11:15:53 crc kubenswrapper[4780]: I1210 11:15:53.786079 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:53 crc kubenswrapper[4780]: I1210 11:15:53.789329 4780 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:15:53 crc kubenswrapper[4780]: I1210 11:15:53.794041 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 10 11:15:54 crc kubenswrapper[4780]: I1210 11:15:54.111626 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8579f5f4d5-t2zl7" event={"ID":"5e7c227f-4a69-4a34-a847-b9bf3e4ed937","Type":"ContainerStarted","Data":"13233533b2f80dda5425841c6bc242add5fc758ea13155f48f47edbed034d988"} Dec 10 11:15:54 crc kubenswrapper[4780]: I1210 11:15:54.114459 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:15:54 crc kubenswrapper[4780]: I1210 11:15:54.146673 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-75499b8cb8-8226n" event={"ID":"2489cfe4-c4b5-4c9b-9977-02d103de7937","Type":"ContainerStarted","Data":"41af12d03d1018a7f77bac5a99fe47ab362afe5f94632185f4b192a626c93678"} Dec 10 11:15:54 crc kubenswrapper[4780]: I1210 11:15:54.146743 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-75499b8cb8-8226n" event={"ID":"2489cfe4-c4b5-4c9b-9977-02d103de7937","Type":"ContainerStarted","Data":"0e1e3cdb00ed355d16ea75a605bba14c62c662f979ffe2b1a8f4b189b0e99667"} Dec 10 11:15:54 crc kubenswrapper[4780]: I1210 11:15:54.146768 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:54 crc kubenswrapper[4780]: I1210 11:15:54.146802 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:15:54 crc kubenswrapper[4780]: I1210 11:15:54.187556 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-8579f5f4d5-t2zl7" podStartSLOduration=3.187411576 podStartE2EDuration="3.187411576s" podCreationTimestamp="2025-12-10 11:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:15:54.16756785 +0000 UTC m=+1859.020961313" watchObservedRunningTime="2025-12-10 11:15:54.187411576 +0000 UTC m=+1859.040805019" Dec 10 11:15:54 crc kubenswrapper[4780]: I1210 11:15:54.233520 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-75499b8cb8-8226n" podStartSLOduration=3.2334809509999998 podStartE2EDuration="3.233480951s" podCreationTimestamp="2025-12-10 11:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:15:54.2252064 +0000 UTC m=+1859.078599843" watchObservedRunningTime="2025-12-10 11:15:54.233480951 +0000 UTC m=+1859.086874394" Dec 10 11:15:56 crc kubenswrapper[4780]: I1210 11:15:56.188456 4780 generic.go:334] "Generic (PLEG): container finished" podID="253b0c60-6211-4e23-921c-b8c34ccc4e25" containerID="4b80868b847dad59daacf4883d411e8b6a1644f6aacfd5b357c1061b720450e0" exitCode=0 Dec 10 11:15:56 crc kubenswrapper[4780]: I1210 11:15:56.188590 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-vcml6" event={"ID":"253b0c60-6211-4e23-921c-b8c34ccc4e25","Type":"ContainerDied","Data":"4b80868b847dad59daacf4883d411e8b6a1644f6aacfd5b357c1061b720450e0"} Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.709747 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-vcml6" Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.742639 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hhlf\" (UniqueName: \"kubernetes.io/projected/253b0c60-6211-4e23-921c-b8c34ccc4e25-kube-api-access-2hhlf\") pod \"253b0c60-6211-4e23-921c-b8c34ccc4e25\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.742733 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-config-data\") pod \"253b0c60-6211-4e23-921c-b8c34ccc4e25\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.743013 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-combined-ca-bundle\") pod \"253b0c60-6211-4e23-921c-b8c34ccc4e25\" (UID: \"253b0c60-6211-4e23-921c-b8c34ccc4e25\") " Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.751005 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/253b0c60-6211-4e23-921c-b8c34ccc4e25-kube-api-access-2hhlf" (OuterVolumeSpecName: "kube-api-access-2hhlf") pod "253b0c60-6211-4e23-921c-b8c34ccc4e25" (UID: "253b0c60-6211-4e23-921c-b8c34ccc4e25"). InnerVolumeSpecName "kube-api-access-2hhlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.796158 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "253b0c60-6211-4e23-921c-b8c34ccc4e25" (UID: "253b0c60-6211-4e23-921c-b8c34ccc4e25"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.850307 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hhlf\" (UniqueName: \"kubernetes.io/projected/253b0c60-6211-4e23-921c-b8c34ccc4e25-kube-api-access-2hhlf\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.850358 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.894273 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-config-data" (OuterVolumeSpecName: "config-data") pod "253b0c60-6211-4e23-921c-b8c34ccc4e25" (UID: "253b0c60-6211-4e23-921c-b8c34ccc4e25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:02 crc kubenswrapper[4780]: I1210 11:16:02.953299 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253b0c60-6211-4e23-921c-b8c34ccc4e25-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:03 crc kubenswrapper[4780]: I1210 11:16:03.314829 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-vcml6" event={"ID":"253b0c60-6211-4e23-921c-b8c34ccc4e25","Type":"ContainerDied","Data":"da679a3b98a99b4b54e1bea3049e6c712f4b19373ec12fe4f824552c7caa52cb"} Dec 10 11:16:03 crc kubenswrapper[4780]: I1210 11:16:03.314917 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da679a3b98a99b4b54e1bea3049e6c712f4b19373ec12fe4f824552c7caa52cb" Dec 10 11:16:03 crc kubenswrapper[4780]: I1210 11:16:03.315090 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-vcml6" Dec 10 11:16:06 crc kubenswrapper[4780]: I1210 11:16:06.961219 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:16:06 crc kubenswrapper[4780]: E1210 11:16:06.962121 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:16:07 crc kubenswrapper[4780]: E1210 11:16:07.483084 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24:latest" Dec 10 11:16:07 crc kubenswrapper[4780]: E1210 11:16:07.483379 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24:latest,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-994dj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(d74fb1d1-0533-4202-9ee5-4a1c04ca6971): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 11:16:07 crc kubenswrapper[4780]: E1210 11:16:07.484868 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"ceilometer-notification-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="d74fb1d1-0533-4202-9ee5-4a1c04ca6971" Dec 10 11:16:08 crc kubenswrapper[4780]: I1210 11:16:08.388681 4780 generic.go:334] "Generic (PLEG): container finished" podID="0613207e-c071-4295-a536-f037ee6fe446" containerID="3502d3dd4df4120fbd326913b61950fb7220222a5f3c9dd175ae7dbd20b0f414" exitCode=0 Dec 10 11:16:08 crc kubenswrapper[4780]: I1210 11:16:08.389131 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cwb2h" event={"ID":"0613207e-c071-4295-a536-f037ee6fe446","Type":"ContainerDied","Data":"3502d3dd4df4120fbd326913b61950fb7220222a5f3c9dd175ae7dbd20b0f414"} Dec 10 11:16:08 crc kubenswrapper[4780]: I1210 11:16:08.392184 4780 generic.go:334] "Generic (PLEG): container finished" podID="1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" containerID="3d15fd52a10848d5989279b9af385fb02e6adedff6cbb19540bcc05ea3be56ee" exitCode=0 Dec 10 11:16:08 crc kubenswrapper[4780]: I1210 11:16:08.392376 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d74fb1d1-0533-4202-9ee5-4a1c04ca6971" containerName="sg-core" containerID="cri-o://d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b" gracePeriod=30 Dec 10 11:16:08 crc kubenswrapper[4780]: I1210 11:16:08.392468 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hn59m" event={"ID":"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9","Type":"ContainerDied","Data":"3d15fd52a10848d5989279b9af385fb02e6adedff6cbb19540bcc05ea3be56ee"} Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.051326 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.237045 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-combined-ca-bundle\") pod \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.237191 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-config-data\") pod \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.237223 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-log-httpd\") pod \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.237333 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-scripts\") pod \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.237583 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-run-httpd\") pod \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.237630 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-sg-core-conf-yaml\") pod \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.237768 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-994dj\" (UniqueName: \"kubernetes.io/projected/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-kube-api-access-994dj\") pod \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\" (UID: \"d74fb1d1-0533-4202-9ee5-4a1c04ca6971\") " Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.238209 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d74fb1d1-0533-4202-9ee5-4a1c04ca6971" (UID: "d74fb1d1-0533-4202-9ee5-4a1c04ca6971"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.238266 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d74fb1d1-0533-4202-9ee5-4a1c04ca6971" (UID: "d74fb1d1-0533-4202-9ee5-4a1c04ca6971"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.239176 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.239204 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.246551 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-kube-api-access-994dj" (OuterVolumeSpecName: "kube-api-access-994dj") pod "d74fb1d1-0533-4202-9ee5-4a1c04ca6971" (UID: "d74fb1d1-0533-4202-9ee5-4a1c04ca6971"). InnerVolumeSpecName "kube-api-access-994dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.247717 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-config-data" (OuterVolumeSpecName: "config-data") pod "d74fb1d1-0533-4202-9ee5-4a1c04ca6971" (UID: "d74fb1d1-0533-4202-9ee5-4a1c04ca6971"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.251152 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d74fb1d1-0533-4202-9ee5-4a1c04ca6971" (UID: "d74fb1d1-0533-4202-9ee5-4a1c04ca6971"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.251870 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-scripts" (OuterVolumeSpecName: "scripts") pod "d74fb1d1-0533-4202-9ee5-4a1c04ca6971" (UID: "d74fb1d1-0533-4202-9ee5-4a1c04ca6971"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.276604 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d74fb1d1-0533-4202-9ee5-4a1c04ca6971" (UID: "d74fb1d1-0533-4202-9ee5-4a1c04ca6971"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.342552 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.342626 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-994dj\" (UniqueName: \"kubernetes.io/projected/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-kube-api-access-994dj\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.342641 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.342653 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.342662 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d74fb1d1-0533-4202-9ee5-4a1c04ca6971-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.408329 4780 generic.go:334] "Generic (PLEG): container finished" podID="d74fb1d1-0533-4202-9ee5-4a1c04ca6971" containerID="d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b" exitCode=2 Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.408447 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d74fb1d1-0533-4202-9ee5-4a1c04ca6971","Type":"ContainerDied","Data":"d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b"} Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.408904 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d74fb1d1-0533-4202-9ee5-4a1c04ca6971","Type":"ContainerDied","Data":"cc1c16c189e2cb86d4b630164daef210aa21060b378d34b6bd382d8952b48cfd"} Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.408497 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.408977 4780 scope.go:117] "RemoveContainer" containerID="d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.463944 4780 scope.go:117] "RemoveContainer" containerID="d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b" Dec 10 11:16:09 crc kubenswrapper[4780]: E1210 11:16:09.465051 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b\": container with ID starting with d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b not found: ID does not exist" containerID="d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.465102 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b"} err="failed to get container status \"d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b\": rpc error: code = NotFound desc = could not find container \"d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b\": container with ID starting with d3d3e89a852b03628da7d97fa6fe05c67e72e39b605215264e2e5401ccd2fe0b not found: ID does not exist" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.528028 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.553613 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.578060 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:16:09 crc kubenswrapper[4780]: E1210 11:16:09.578892 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="253b0c60-6211-4e23-921c-b8c34ccc4e25" containerName="heat-db-sync" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.579078 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="253b0c60-6211-4e23-921c-b8c34ccc4e25" containerName="heat-db-sync" Dec 10 11:16:09 crc kubenswrapper[4780]: E1210 11:16:09.579135 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d74fb1d1-0533-4202-9ee5-4a1c04ca6971" containerName="sg-core" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.579148 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d74fb1d1-0533-4202-9ee5-4a1c04ca6971" containerName="sg-core" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.579506 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d74fb1d1-0533-4202-9ee5-4a1c04ca6971" containerName="sg-core" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.579539 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="253b0c60-6211-4e23-921c-b8c34ccc4e25" containerName="heat-db-sync" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.582548 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.585688 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.587376 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.597430 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.753976 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-scripts\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.754114 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.754273 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.754329 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-run-httpd\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.754375 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rm5l\" (UniqueName: \"kubernetes.io/projected/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-kube-api-access-5rm5l\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.754408 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-log-httpd\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.754475 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-config-data\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.857651 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-config-data\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.858023 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-scripts\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.858140 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.858252 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.858287 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-run-httpd\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.858330 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rm5l\" (UniqueName: \"kubernetes.io/projected/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-kube-api-access-5rm5l\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.858372 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-log-httpd\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:09 crc kubenswrapper[4780]: I1210 11:16:09.976866 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d74fb1d1-0533-4202-9ee5-4a1c04ca6971" path="/var/lib/kubelet/pods/d74fb1d1-0533-4202-9ee5-4a1c04ca6971/volumes" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.179497 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-log-httpd\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.179593 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-run-httpd\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.188259 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.188348 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.188804 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-config-data\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.189256 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rm5l\" (UniqueName: \"kubernetes.io/projected/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-kube-api-access-5rm5l\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.203210 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-scripts\") pod \"ceilometer-0\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " pod="openstack/ceilometer-0" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.223210 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.399592 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.408368 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hn59m" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.476288 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cwb2h" event={"ID":"0613207e-c071-4295-a536-f037ee6fe446","Type":"ContainerDied","Data":"f6f5b6398f37368334b2a916953d352093a3fc5c9cc53ed1f266f23146d822fd"} Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.476373 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6f5b6398f37368334b2a916953d352093a3fc5c9cc53ed1f266f23146d822fd" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.476503 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cwb2h" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.512187 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hn59m" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.513098 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hn59m" event={"ID":"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9","Type":"ContainerDied","Data":"65a06acba54c7499c055d1499360406b15c55eeeecdff2bd7ec63e1abcad4deb"} Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.513228 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65a06acba54c7499c055d1499360406b15c55eeeecdff2bd7ec63e1abcad4deb" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.576567 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-config\") pod \"0613207e-c071-4295-a536-f037ee6fe446\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.576841 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72d29\" (UniqueName: \"kubernetes.io/projected/0613207e-c071-4295-a536-f037ee6fe446-kube-api-access-72d29\") pod \"0613207e-c071-4295-a536-f037ee6fe446\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.576878 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtvp7\" (UniqueName: \"kubernetes.io/projected/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-kube-api-access-gtvp7\") pod \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.576940 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-db-sync-config-data\") pod \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.577044 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-combined-ca-bundle\") pod \"0613207e-c071-4295-a536-f037ee6fe446\" (UID: \"0613207e-c071-4295-a536-f037ee6fe446\") " Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.577169 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-combined-ca-bundle\") pod \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\" (UID: \"1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9\") " Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.588059 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-kube-api-access-gtvp7" (OuterVolumeSpecName: "kube-api-access-gtvp7") pod "1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" (UID: "1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9"). InnerVolumeSpecName "kube-api-access-gtvp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.604593 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0613207e-c071-4295-a536-f037ee6fe446-kube-api-access-72d29" (OuterVolumeSpecName: "kube-api-access-72d29") pod "0613207e-c071-4295-a536-f037ee6fe446" (UID: "0613207e-c071-4295-a536-f037ee6fe446"). InnerVolumeSpecName "kube-api-access-72d29". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.631165 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" (UID: "1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.638205 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" (UID: "1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.661188 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-config" (OuterVolumeSpecName: "config") pod "0613207e-c071-4295-a536-f037ee6fe446" (UID: "0613207e-c071-4295-a536-f037ee6fe446"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.668349 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0613207e-c071-4295-a536-f037ee6fe446" (UID: "0613207e-c071-4295-a536-f037ee6fe446"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.683724 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72d29\" (UniqueName: \"kubernetes.io/projected/0613207e-c071-4295-a536-f037ee6fe446-kube-api-access-72d29\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.683807 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtvp7\" (UniqueName: \"kubernetes.io/projected/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-kube-api-access-gtvp7\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.683823 4780 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.683835 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.683852 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.683865 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/0613207e-c071-4295-a536-f037ee6fe446-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:10 crc kubenswrapper[4780]: I1210 11:16:10.979639 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:16:10 crc kubenswrapper[4780]: W1210 11:16:10.990517 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f099496_ee6c_44ff_9c42_c6584b8fb2ad.slice/crio-dbd23b877d5a4d2609f06c17387931552678f5268574b1a3447ea07aa49ab53a WatchSource:0}: Error finding container dbd23b877d5a4d2609f06c17387931552678f5268574b1a3447ea07aa49ab53a: Status 404 returned error can't find the container with id dbd23b877d5a4d2609f06c17387931552678f5268574b1a3447ea07aa49ab53a Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.544733 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerStarted","Data":"dbd23b877d5a4d2609f06c17387931552678f5268574b1a3447ea07aa49ab53a"} Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.849013 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7c9948ddf4-nxp8k"] Dec 10 11:16:11 crc kubenswrapper[4780]: E1210 11:16:11.849896 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" containerName="barbican-db-sync" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.849941 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" containerName="barbican-db-sync" Dec 10 11:16:11 crc kubenswrapper[4780]: E1210 11:16:11.850022 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0613207e-c071-4295-a536-f037ee6fe446" containerName="neutron-db-sync" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.850036 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0613207e-c071-4295-a536-f037ee6fe446" containerName="neutron-db-sync" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.850370 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" containerName="barbican-db-sync" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.850417 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0613207e-c071-4295-a536-f037ee6fe446" containerName="neutron-db-sync" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.852585 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.860066 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-np76s" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.869773 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-57dc4b8bd7-6w5kj"] Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.873948 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.874064 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.874227 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.882377 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.906580 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-57dc4b8bd7-6w5kj"] Dec 10 11:16:11 crc kubenswrapper[4780]: I1210 11:16:11.941943 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7c9948ddf4-nxp8k"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:11.999002 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26c869ea-7347-4b65-9a77-2995a7e574ce-logs\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.001120 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-combined-ca-bundle\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.001399 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-config-data\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.001596 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-combined-ca-bundle\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.001681 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70d1eef3-a181-4ada-897f-4b3b9620e4de-logs\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.002089 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-config-data-custom\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.002945 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdcj8\" (UniqueName: \"kubernetes.io/projected/70d1eef3-a181-4ada-897f-4b3b9620e4de-kube-api-access-bdcj8\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.006096 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tkcv\" (UniqueName: \"kubernetes.io/projected/26c869ea-7347-4b65-9a77-2995a7e574ce-kube-api-access-4tkcv\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.014388 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-config-data-custom\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.014790 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-config-data\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.012549 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qz82w"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.017745 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.055452 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qz82w"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.128007 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-combined-ca-bundle\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.128109 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-config\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.128195 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-config-data\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.128320 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-combined-ca-bundle\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.129789 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70d1eef3-a181-4ada-897f-4b3b9620e4de-logs\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.129939 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-config-data-custom\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.129999 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130046 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdcj8\" (UniqueName: \"kubernetes.io/projected/70d1eef3-a181-4ada-897f-4b3b9620e4de-kube-api-access-bdcj8\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130107 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130137 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tkcv\" (UniqueName: \"kubernetes.io/projected/26c869ea-7347-4b65-9a77-2995a7e574ce-kube-api-access-4tkcv\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130283 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-config-data-custom\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130354 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130389 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130577 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c59tp\" (UniqueName: \"kubernetes.io/projected/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-kube-api-access-c59tp\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130730 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-config-data\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130730 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70d1eef3-a181-4ada-897f-4b3b9620e4de-logs\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.130867 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26c869ea-7347-4b65-9a77-2995a7e574ce-logs\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.131334 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26c869ea-7347-4b65-9a77-2995a7e574ce-logs\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.154263 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-config-data-custom\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.161348 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-combined-ca-bundle\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.162155 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-config-data-custom\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.176174 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-config-data\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.184650 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qz82w"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.192358 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdcj8\" (UniqueName: \"kubernetes.io/projected/70d1eef3-a181-4ada-897f-4b3b9620e4de-kube-api-access-bdcj8\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.193897 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70d1eef3-a181-4ada-897f-4b3b9620e4de-config-data\") pod \"barbican-worker-57dc4b8bd7-6w5kj\" (UID: \"70d1eef3-a181-4ada-897f-4b3b9620e4de\") " pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.210151 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tkcv\" (UniqueName: \"kubernetes.io/projected/26c869ea-7347-4b65-9a77-2995a7e574ce-kube-api-access-4tkcv\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.217751 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26c869ea-7347-4b65-9a77-2995a7e574ce-combined-ca-bundle\") pod \"barbican-keystone-listener-7c9948ddf4-nxp8k\" (UID: \"26c869ea-7347-4b65-9a77-2995a7e574ce\") " pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.220650 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5dd58fdf76-hn7kd"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.226597 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.227554 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.238175 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-899t9\" (UniqueName: \"kubernetes.io/projected/0744f766-309b-4632-a522-ba8d51a5fa80-kube-api-access-899t9\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.238523 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.238628 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.238660 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-combined-ca-bundle\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.238707 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-ovndb-tls-certs\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.238831 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.238875 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.242752 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.247459 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.247994 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-5dbzk" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.262369 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.251266 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5dd58fdf76-hn7kd"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.252409 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.252593 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-config\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.263064 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-httpd-config\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.263198 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c59tp\" (UniqueName: \"kubernetes.io/projected/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-kube-api-access-c59tp\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.263831 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-config\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.253456 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.251765 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.248243 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.265389 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-config\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.248332 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.281070 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-88btm"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.297095 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-88btm"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.313281 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.352307 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-56ccbfd9d6-x59x5"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.356195 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.372012 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56ccbfd9d6-x59x5"] Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.376731 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.382347 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c59tp\" (UniqueName: \"kubernetes.io/projected/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-kube-api-access-c59tp\") pod \"dnsmasq-dns-55f844cf75-qz82w\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.396489 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-config\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.396647 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lj6j\" (UniqueName: \"kubernetes.io/projected/50917050-1856-4678-b62e-c0e165cb3d6c-kube-api-access-6lj6j\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.396741 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-svc\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.396899 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.397004 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-899t9\" (UniqueName: \"kubernetes.io/projected/0744f766-309b-4632-a522-ba8d51a5fa80-kube-api-access-899t9\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.397442 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-combined-ca-bundle\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.397485 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.397535 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-ovndb-tls-certs\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.397725 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-config\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.397777 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-httpd-config\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.405312 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.417750 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-httpd-config\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.419009 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-config\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.433130 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-combined-ca-bundle\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.436241 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-899t9\" (UniqueName: \"kubernetes.io/projected/0744f766-309b-4632-a522-ba8d51a5fa80-kube-api-access-899t9\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.457642 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-ovndb-tls-certs\") pod \"neutron-5dd58fdf76-hn7kd\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.492562 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.501807 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.511802 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/378bdac2-e552-48df-8bac-fd4300d016e5-logs\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.511885 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.511937 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzfrj\" (UniqueName: \"kubernetes.io/projected/378bdac2-e552-48df-8bac-fd4300d016e5-kube-api-access-xzfrj\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.511995 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-config\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.512033 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lj6j\" (UniqueName: \"kubernetes.io/projected/50917050-1856-4678-b62e-c0e165cb3d6c-kube-api-access-6lj6j\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.512068 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-svc\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.512113 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.512163 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.512189 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data-custom\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.512253 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.512272 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-combined-ca-bundle\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.515831 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-config\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.516836 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-svc\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.516864 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.517773 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.520055 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.544224 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lj6j\" (UniqueName: \"kubernetes.io/projected/50917050-1856-4678-b62e-c0e165cb3d6c-kube-api-access-6lj6j\") pod \"dnsmasq-dns-85ff748b95-88btm\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.617412 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/378bdac2-e552-48df-8bac-fd4300d016e5-logs\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.617540 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzfrj\" (UniqueName: \"kubernetes.io/projected/378bdac2-e552-48df-8bac-fd4300d016e5-kube-api-access-xzfrj\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.617759 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.617974 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data-custom\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.618400 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-combined-ca-bundle\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.618817 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/378bdac2-e552-48df-8bac-fd4300d016e5-logs\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.680725 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-combined-ca-bundle\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.683284 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.687016 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data-custom\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.690783 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzfrj\" (UniqueName: \"kubernetes.io/projected/378bdac2-e552-48df-8bac-fd4300d016e5-kube-api-access-xzfrj\") pod \"barbican-api-56ccbfd9d6-x59x5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.707551 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:12 crc kubenswrapper[4780]: I1210 11:16:12.741174 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:13 crc kubenswrapper[4780]: I1210 11:16:13.640687 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerStarted","Data":"a9abfaf819bc7a6dd38ec6a5932abeb0842946e7c626f84e70b57f864190f47e"} Dec 10 11:16:13 crc kubenswrapper[4780]: I1210 11:16:13.776021 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-57dc4b8bd7-6w5kj"] Dec 10 11:16:13 crc kubenswrapper[4780]: I1210 11:16:13.802768 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7c9948ddf4-nxp8k"] Dec 10 11:16:13 crc kubenswrapper[4780]: I1210 11:16:13.868181 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qz82w"] Dec 10 11:16:14 crc kubenswrapper[4780]: W1210 11:16:14.011066 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0744f766_309b_4632_a522_ba8d51a5fa80.slice/crio-de146baaac0a79072ff6fa2acb84e65759346b833a2a7d10a945694f6629ebe3 WatchSource:0}: Error finding container de146baaac0a79072ff6fa2acb84e65759346b833a2a7d10a945694f6629ebe3: Status 404 returned error can't find the container with id de146baaac0a79072ff6fa2acb84e65759346b833a2a7d10a945694f6629ebe3 Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.027418 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5dd58fdf76-hn7kd"] Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.060038 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56ccbfd9d6-x59x5"] Dec 10 11:16:14 crc kubenswrapper[4780]: W1210 11:16:14.127881 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod378bdac2_e552_48df_8bac_fd4300d016e5.slice/crio-c37cee7a29fa7d5477cc7bdef6707f05420c51edd452e4a7ca3be5f3ad210c4a WatchSource:0}: Error finding container c37cee7a29fa7d5477cc7bdef6707f05420c51edd452e4a7ca3be5f3ad210c4a: Status 404 returned error can't find the container with id c37cee7a29fa7d5477cc7bdef6707f05420c51edd452e4a7ca3be5f3ad210c4a Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.134421 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-88btm"] Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.663470 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-88btm" event={"ID":"50917050-1856-4678-b62e-c0e165cb3d6c","Type":"ContainerStarted","Data":"f6ec91e260bfe51b1b102b9fb54bf3171b562329875b1b92dde70b1836c91478"} Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.672421 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd58fdf76-hn7kd" event={"ID":"0744f766-309b-4632-a522-ba8d51a5fa80","Type":"ContainerStarted","Data":"6b66df0c1a3d7d39c43b559fb877ad8ff5d4b2ba89f2344ba205907ddd7c597c"} Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.672647 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd58fdf76-hn7kd" event={"ID":"0744f766-309b-4632-a522-ba8d51a5fa80","Type":"ContainerStarted","Data":"de146baaac0a79072ff6fa2acb84e65759346b833a2a7d10a945694f6629ebe3"} Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.674800 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56ccbfd9d6-x59x5" event={"ID":"378bdac2-e552-48df-8bac-fd4300d016e5","Type":"ContainerStarted","Data":"c37cee7a29fa7d5477cc7bdef6707f05420c51edd452e4a7ca3be5f3ad210c4a"} Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.700611 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" event={"ID":"70d1eef3-a181-4ada-897f-4b3b9620e4de","Type":"ContainerStarted","Data":"fc5ba47902c114db0ae7106a3d54bfe6a641b742a06ebdccf9d6ea7eed9cbeb8"} Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.723044 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" event={"ID":"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99","Type":"ContainerStarted","Data":"e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33"} Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.723120 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" event={"ID":"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99","Type":"ContainerStarted","Data":"ef397a51bf20e4ef5f39349ecc04172a96e3c4586c744b01cdbbe0ee559f4464"} Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.723321 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" podUID="3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" containerName="init" containerID="cri-o://e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33" gracePeriod=10 Dec 10 11:16:14 crc kubenswrapper[4780]: I1210 11:16:14.738989 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" event={"ID":"26c869ea-7347-4b65-9a77-2995a7e574ce","Type":"ContainerStarted","Data":"0323056a36a1a134508896403a7f58d6b3480319a826992cffa88afbff86ecf9"} Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.677852 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.766657 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerStarted","Data":"b4fa9fbc70f5848ae91210eb2c0d2674c29b75dde7c0216f7c8d756f517045f2"} Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.771877 4780 generic.go:334] "Generic (PLEG): container finished" podID="50917050-1856-4678-b62e-c0e165cb3d6c" containerID="7aec1fea9e85dfda98c5ef3dde03c440320447f04fb7542721402af2cd306de0" exitCode=0 Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.772241 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-88btm" event={"ID":"50917050-1856-4678-b62e-c0e165cb3d6c","Type":"ContainerDied","Data":"7aec1fea9e85dfda98c5ef3dde03c440320447f04fb7542721402af2cd306de0"} Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.779844 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd58fdf76-hn7kd" event={"ID":"0744f766-309b-4632-a522-ba8d51a5fa80","Type":"ContainerStarted","Data":"6414a1e874a5a81e8a8ed5be1dc1dce97b0ed571884db3a456d15ce58ce8f03d"} Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.781221 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.791126 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56ccbfd9d6-x59x5" event={"ID":"378bdac2-e552-48df-8bac-fd4300d016e5","Type":"ContainerStarted","Data":"73545b11b81b920bd41b372f79463bc1e603f10ba535527eca9a3b8b2f0e0fe1"} Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.791216 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56ccbfd9d6-x59x5" event={"ID":"378bdac2-e552-48df-8bac-fd4300d016e5","Type":"ContainerStarted","Data":"7f9a9989ef7beb4efab75e5acb363453094cb5853fd567812b5ca17940fd14b4"} Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.791931 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.792603 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.804628 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-sb\") pod \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.804748 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-nb\") pod \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.804874 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-config\") pod \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.804983 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c59tp\" (UniqueName: \"kubernetes.io/projected/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-kube-api-access-c59tp\") pod \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.805045 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-svc\") pod \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.808150 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-swift-storage-0\") pod \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\" (UID: \"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99\") " Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.817136 4780 generic.go:334] "Generic (PLEG): container finished" podID="3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" containerID="e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33" exitCode=0 Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.817276 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" event={"ID":"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99","Type":"ContainerDied","Data":"e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33"} Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.817330 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" event={"ID":"3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99","Type":"ContainerDied","Data":"ef397a51bf20e4ef5f39349ecc04172a96e3c4586c744b01cdbbe0ee559f4464"} Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.817433 4780 scope.go:117] "RemoveContainer" containerID="e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.817802 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qz82w" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.825611 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-kube-api-access-c59tp" (OuterVolumeSpecName: "kube-api-access-c59tp") pod "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" (UID: "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99"). InnerVolumeSpecName "kube-api-access-c59tp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.882617 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" (UID: "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.887005 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podStartSLOduration=3.886974306 podStartE2EDuration="3.886974306s" podCreationTimestamp="2025-12-10 11:16:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:16:15.831189637 +0000 UTC m=+1880.684583080" watchObservedRunningTime="2025-12-10 11:16:15.886974306 +0000 UTC m=+1880.740367749" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.895803 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" (UID: "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.915359 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.915411 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.915422 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c59tp\" (UniqueName: \"kubernetes.io/projected/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-kube-api-access-c59tp\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.919086 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5dd58fdf76-hn7kd" podStartSLOduration=3.919055528 podStartE2EDuration="3.919055528s" podCreationTimestamp="2025-12-10 11:16:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:16:15.874462015 +0000 UTC m=+1880.727855458" watchObservedRunningTime="2025-12-10 11:16:15.919055528 +0000 UTC m=+1880.772448971" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.921703 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" (UID: "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.924625 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" (UID: "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:15 crc kubenswrapper[4780]: I1210 11:16:15.931685 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-config" (OuterVolumeSpecName: "config") pod "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" (UID: "3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.023142 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.023776 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.024025 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.221095 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qz82w"] Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.259696 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qz82w"] Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.296365 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5ccbff869-dj8wl"] Dec 10 11:16:16 crc kubenswrapper[4780]: E1210 11:16:16.297285 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" containerName="init" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.297318 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" containerName="init" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.297720 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" containerName="init" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.299981 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.305558 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.305859 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.314465 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5ccbff869-dj8wl"] Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.466281 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-internal-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.466520 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-config\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.466605 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-ovndb-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.466719 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vclqj\" (UniqueName: \"kubernetes.io/projected/7dff6c31-a933-44b7-ad10-b06d1527c768-kube-api-access-vclqj\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.466761 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-combined-ca-bundle\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.466858 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-public-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.467038 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-httpd-config\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.572907 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-public-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.573591 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-httpd-config\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.573791 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-internal-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.573869 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-config\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.574109 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-ovndb-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.574516 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vclqj\" (UniqueName: \"kubernetes.io/projected/7dff6c31-a933-44b7-ad10-b06d1527c768-kube-api-access-vclqj\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.574585 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-combined-ca-bundle\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.582206 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-public-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.583227 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-httpd-config\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.583586 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-internal-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.583749 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-config\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.584611 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-ovndb-tls-certs\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.589892 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dff6c31-a933-44b7-ad10-b06d1527c768-combined-ca-bundle\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.595194 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vclqj\" (UniqueName: \"kubernetes.io/projected/7dff6c31-a933-44b7-ad10-b06d1527c768-kube-api-access-vclqj\") pod \"neutron-5ccbff869-dj8wl\" (UID: \"7dff6c31-a933-44b7-ad10-b06d1527c768\") " pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:16 crc kubenswrapper[4780]: I1210 11:16:16.718077 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:17 crc kubenswrapper[4780]: I1210 11:16:17.987070 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99" path="/var/lib/kubelet/pods/3c4736f9-a5dd-4fcc-bf0b-d95bf5e94b99/volumes" Dec 10 11:16:18 crc kubenswrapper[4780]: I1210 11:16:18.968629 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:16:18 crc kubenswrapper[4780]: E1210 11:16:18.993220 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.144614 4780 scope.go:117] "RemoveContainer" containerID="e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33" Dec 10 11:16:19 crc kubenswrapper[4780]: E1210 11:16:19.149251 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33\": container with ID starting with e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33 not found: ID does not exist" containerID="e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.149327 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33"} err="failed to get container status \"e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33\": rpc error: code = NotFound desc = could not find container \"e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33\": container with ID starting with e307e66679af595e1ccba3adca0aa09e57fc6278b076ffc8a9698399b520bb33 not found: ID does not exist" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.663987 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-684545bb8-fmwfm"] Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.669171 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.682774 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.686331 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.700026 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-684545bb8-fmwfm"] Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.820681 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-internal-tls-certs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.820994 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-combined-ca-bundle\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.821184 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-logs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.821275 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-config-data\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.821330 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-config-data-custom\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.821365 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc4mb\" (UniqueName: \"kubernetes.io/projected/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-kube-api-access-jc4mb\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.821606 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-public-tls-certs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.915131 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" event={"ID":"70d1eef3-a181-4ada-897f-4b3b9620e4de","Type":"ContainerStarted","Data":"b0bcd94efc61e9ca66cff0fe8677e986fcad0e5e85973191057b9f0ab5511796"} Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.923992 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-internal-tls-certs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.924121 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-combined-ca-bundle\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.924205 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-logs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.924258 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-config-data\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.924316 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-config-data-custom\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.924347 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc4mb\" (UniqueName: \"kubernetes.io/projected/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-kube-api-access-jc4mb\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.924469 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-public-tls-certs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.925542 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-logs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.929523 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-public-tls-certs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.931693 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-config-data-custom\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.934665 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-internal-tls-certs\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.934820 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-combined-ca-bundle\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.940241 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-config-data\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.958390 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc4mb\" (UniqueName: \"kubernetes.io/projected/2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290-kube-api-access-jc4mb\") pod \"barbican-api-684545bb8-fmwfm\" (UID: \"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290\") " pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:19 crc kubenswrapper[4780]: I1210 11:16:19.998806 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerStarted","Data":"e95636e8f3a430b662ce91c03ec6e1e291e4540c649d4504ea274b5088557cbb"} Dec 10 11:16:20 crc kubenswrapper[4780]: I1210 11:16:19.999804 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:20 crc kubenswrapper[4780]: I1210 11:16:20.034812 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5ccbff869-dj8wl"] Dec 10 11:16:20 crc kubenswrapper[4780]: W1210 11:16:20.103178 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7dff6c31_a933_44b7_ad10_b06d1527c768.slice/crio-a9806b3374d57710332a9fbd9999b023686136eb193790bbae9c0433e0bffcaa WatchSource:0}: Error finding container a9806b3374d57710332a9fbd9999b023686136eb193790bbae9c0433e0bffcaa: Status 404 returned error can't find the container with id a9806b3374d57710332a9fbd9999b023686136eb193790bbae9c0433e0bffcaa Dec 10 11:16:20 crc kubenswrapper[4780]: I1210 11:16:20.738864 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-684545bb8-fmwfm"] Dec 10 11:16:20 crc kubenswrapper[4780]: I1210 11:16:20.994818 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-88btm" event={"ID":"50917050-1856-4678-b62e-c0e165cb3d6c","Type":"ContainerStarted","Data":"48fcc54d3d11c4b6abf849b6b88d982df04943b26932a60524cc943d5dd3a171"} Dec 10 11:16:20 crc kubenswrapper[4780]: I1210 11:16:20.997307 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:21 crc kubenswrapper[4780]: I1210 11:16:21.001169 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" event={"ID":"26c869ea-7347-4b65-9a77-2995a7e574ce","Type":"ContainerStarted","Data":"fc4812fe0e335e2373dbc475acc8c6890b653f4ea9b805cca099abbbb6a1c682"} Dec 10 11:16:21 crc kubenswrapper[4780]: I1210 11:16:21.004657 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-684545bb8-fmwfm" event={"ID":"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290","Type":"ContainerStarted","Data":"eb6126432766f5ebc47b481dc842a03530d8b53a7ce4f65d073076aad2cf92ae"} Dec 10 11:16:21 crc kubenswrapper[4780]: I1210 11:16:21.007557 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbff869-dj8wl" event={"ID":"7dff6c31-a933-44b7-ad10-b06d1527c768","Type":"ContainerStarted","Data":"a9806b3374d57710332a9fbd9999b023686136eb193790bbae9c0433e0bffcaa"} Dec 10 11:16:21 crc kubenswrapper[4780]: I1210 11:16:21.044593 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85ff748b95-88btm" podStartSLOduration=9.044559567 podStartE2EDuration="9.044559567s" podCreationTimestamp="2025-12-10 11:16:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:16:21.02905179 +0000 UTC m=+1885.882445233" watchObservedRunningTime="2025-12-10 11:16:21.044559567 +0000 UTC m=+1885.897953010" Dec 10 11:16:22 crc kubenswrapper[4780]: I1210 11:16:22.049788 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-684545bb8-fmwfm" event={"ID":"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290","Type":"ContainerStarted","Data":"029ea1cdc0b8afc9f7f1e35670d3be923e0de82cee6b3d86672e181073d3cf86"} Dec 10 11:16:22 crc kubenswrapper[4780]: I1210 11:16:22.063085 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbff869-dj8wl" event={"ID":"7dff6c31-a933-44b7-ad10-b06d1527c768","Type":"ContainerStarted","Data":"55cead359232b4663661b06a0254179ad9fa8a5461eb0aaa464023b7aa5e3bb6"} Dec 10 11:16:22 crc kubenswrapper[4780]: I1210 11:16:22.083123 4780 generic.go:334] "Generic (PLEG): container finished" podID="f2a06360-9c37-4ae4-8148-73c37d2be5a4" containerID="48344f280bdf19a5857712446887c552fa4f17973c1275cd703846f6e6ceb87f" exitCode=0 Dec 10 11:16:22 crc kubenswrapper[4780]: I1210 11:16:22.083292 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-wqs8s" event={"ID":"f2a06360-9c37-4ae4-8148-73c37d2be5a4","Type":"ContainerDied","Data":"48344f280bdf19a5857712446887c552fa4f17973c1275cd703846f6e6ceb87f"} Dec 10 11:16:22 crc kubenswrapper[4780]: I1210 11:16:22.108944 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" event={"ID":"70d1eef3-a181-4ada-897f-4b3b9620e4de","Type":"ContainerStarted","Data":"91c66f858538f76c097b57c64ceaac54a0dc8286bd492470f16c553c7f9b5de8"} Dec 10 11:16:22 crc kubenswrapper[4780]: I1210 11:16:22.119619 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" event={"ID":"26c869ea-7347-4b65-9a77-2995a7e574ce","Type":"ContainerStarted","Data":"c180513d715f1e9dd43e1a1b05aa18da946a15642f46258fabb1a0dfd323f9d3"} Dec 10 11:16:22 crc kubenswrapper[4780]: I1210 11:16:22.193018 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-57dc4b8bd7-6w5kj" podStartSLOduration=5.75349251 podStartE2EDuration="11.192977891s" podCreationTimestamp="2025-12-10 11:16:11 +0000 UTC" firstStartedPulling="2025-12-10 11:16:13.800907802 +0000 UTC m=+1878.654301245" lastFinishedPulling="2025-12-10 11:16:19.240393183 +0000 UTC m=+1884.093786626" observedRunningTime="2025-12-10 11:16:22.148673396 +0000 UTC m=+1887.002066859" watchObservedRunningTime="2025-12-10 11:16:22.192977891 +0000 UTC m=+1887.046371334" Dec 10 11:16:22 crc kubenswrapper[4780]: I1210 11:16:22.201982 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7c9948ddf4-nxp8k" podStartSLOduration=5.752208769 podStartE2EDuration="11.201947171s" podCreationTimestamp="2025-12-10 11:16:11 +0000 UTC" firstStartedPulling="2025-12-10 11:16:13.789281846 +0000 UTC m=+1878.642675289" lastFinishedPulling="2025-12-10 11:16:19.239020248 +0000 UTC m=+1884.092413691" observedRunningTime="2025-12-10 11:16:22.185625933 +0000 UTC m=+1887.039019376" watchObservedRunningTime="2025-12-10 11:16:22.201947171 +0000 UTC m=+1887.055340614" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.061590 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.127327 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-db-sync-config-data\") pod \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.127832 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f2a06360-9c37-4ae4-8148-73c37d2be5a4-etc-machine-id\") pod \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.127989 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-combined-ca-bundle\") pod \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.128054 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhq8m\" (UniqueName: \"kubernetes.io/projected/f2a06360-9c37-4ae4-8148-73c37d2be5a4-kube-api-access-nhq8m\") pod \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.128192 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-scripts\") pod \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.128216 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-config-data\") pod \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\" (UID: \"f2a06360-9c37-4ae4-8148-73c37d2be5a4\") " Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.149028 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f2a06360-9c37-4ae4-8148-73c37d2be5a4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f2a06360-9c37-4ae4-8148-73c37d2be5a4" (UID: "f2a06360-9c37-4ae4-8148-73c37d2be5a4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.173217 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f2a06360-9c37-4ae4-8148-73c37d2be5a4" (UID: "f2a06360-9c37-4ae4-8148-73c37d2be5a4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.186217 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-scripts" (OuterVolumeSpecName: "scripts") pod "f2a06360-9c37-4ae4-8148-73c37d2be5a4" (UID: "f2a06360-9c37-4ae4-8148-73c37d2be5a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.228513 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2a06360-9c37-4ae4-8148-73c37d2be5a4-kube-api-access-nhq8m" (OuterVolumeSpecName: "kube-api-access-nhq8m") pod "f2a06360-9c37-4ae4-8148-73c37d2be5a4" (UID: "f2a06360-9c37-4ae4-8148-73c37d2be5a4"). InnerVolumeSpecName "kube-api-access-nhq8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.236258 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f2a06360-9c37-4ae4-8148-73c37d2be5a4" (UID: "f2a06360-9c37-4ae4-8148-73c37d2be5a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.253532 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.253597 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhq8m\" (UniqueName: \"kubernetes.io/projected/f2a06360-9c37-4ae4-8148-73c37d2be5a4-kube-api-access-nhq8m\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.253614 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.253625 4780 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.253638 4780 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f2a06360-9c37-4ae4-8148-73c37d2be5a4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.292525 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-wqs8s" event={"ID":"f2a06360-9c37-4ae4-8148-73c37d2be5a4","Type":"ContainerDied","Data":"d67bdf26450e143660c8fb95c65ab68434cf08bc7ec098ca71447cad3d131717"} Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.292602 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d67bdf26450e143660c8fb95c65ab68434cf08bc7ec098ca71447cad3d131717" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.292731 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-wqs8s" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.324861 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-684545bb8-fmwfm" event={"ID":"2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290","Type":"ContainerStarted","Data":"21f92cfefdc365f69917b9d3f7ccc5417e418b6109c9bd7808c55398cccf436b"} Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.327265 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.327536 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.353007 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5ccbff869-dj8wl" event={"ID":"7dff6c31-a933-44b7-ad10-b06d1527c768","Type":"ContainerStarted","Data":"c071913051bb6f26d0cfda51c56e1556c35f8d4dee7c74e1647586823c4da986"} Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.353511 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.397091 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-config-data" (OuterVolumeSpecName: "config-data") pod "f2a06360-9c37-4ae4-8148-73c37d2be5a4" (UID: "f2a06360-9c37-4ae4-8148-73c37d2be5a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.416265 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerStarted","Data":"7326f7d0eb9117d311b8a0fce5df5cce919e314e9848cc272e29de1558568644"} Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.423451 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.480037 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-684545bb8-fmwfm" podStartSLOduration=5.479999256 podStartE2EDuration="5.479999256s" podCreationTimestamp="2025-12-10 11:16:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:16:24.417157606 +0000 UTC m=+1889.270551049" watchObservedRunningTime="2025-12-10 11:16:24.479999256 +0000 UTC m=+1889.333392699" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.585667 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2a06360-9c37-4ae4-8148-73c37d2be5a4-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.597448 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.877420122 podStartE2EDuration="15.597421424s" podCreationTimestamp="2025-12-10 11:16:09 +0000 UTC" firstStartedPulling="2025-12-10 11:16:10.995356182 +0000 UTC m=+1875.848749625" lastFinishedPulling="2025-12-10 11:16:21.715357484 +0000 UTC m=+1886.568750927" observedRunningTime="2025-12-10 11:16:24.479827912 +0000 UTC m=+1889.333221355" watchObservedRunningTime="2025-12-10 11:16:24.597421424 +0000 UTC m=+1889.450814867" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.741600 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5ccbff869-dj8wl" podStartSLOduration=8.741562678 podStartE2EDuration="8.741562678s" podCreationTimestamp="2025-12-10 11:16:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:16:24.591906583 +0000 UTC m=+1889.445300046" watchObservedRunningTime="2025-12-10 11:16:24.741562678 +0000 UTC m=+1889.594956121" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.903735 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:16:24 crc kubenswrapper[4780]: E1210 11:16:24.910024 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2a06360-9c37-4ae4-8148-73c37d2be5a4" containerName="cinder-db-sync" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.910083 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2a06360-9c37-4ae4-8148-73c37d2be5a4" containerName="cinder-db-sync" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.911036 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2a06360-9c37-4ae4-8148-73c37d2be5a4" containerName="cinder-db-sync" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.937980 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.951737 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.952330 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.952527 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-hdqfg" Dec 10 11:16:24 crc kubenswrapper[4780]: I1210 11:16:24.952694 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.003308 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.086298 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtqss\" (UniqueName: \"kubernetes.io/projected/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-kube-api-access-gtqss\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.086417 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.086658 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.086732 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.087459 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.088312 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-scripts\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.135281 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-88btm"] Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.135720 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85ff748b95-88btm" podUID="50917050-1856-4678-b62e-c0e165cb3d6c" containerName="dnsmasq-dns" containerID="cri-o://48fcc54d3d11c4b6abf849b6b88d982df04943b26932a60524cc943d5dd3a171" gracePeriod=10 Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.145177 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.162236 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-wkfgs"] Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.165812 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.207664 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.207878 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.207986 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.207892 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.208331 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-scripts\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.208583 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtqss\" (UniqueName: \"kubernetes.io/projected/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-kube-api-access-gtqss\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.208704 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.214892 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-wkfgs"] Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.219626 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-scripts\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.227986 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.265685 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtqss\" (UniqueName: \"kubernetes.io/projected/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-kube-api-access-gtqss\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.266397 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.267317 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data\") pod \"cinder-scheduler-0\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.318711 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.322973 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.323235 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnpkv\" (UniqueName: \"kubernetes.io/projected/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-kube-api-access-mnpkv\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.324160 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-config\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.324298 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.324977 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.386842 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.393453 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.399389 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.412908 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.430125 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.430225 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.430262 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.430313 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnpkv\" (UniqueName: \"kubernetes.io/projected/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-kube-api-access-mnpkv\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.430351 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-config\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.430394 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.431618 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.435310 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.435972 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.436597 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.437625 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-config\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.438602 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.505934 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnpkv\" (UniqueName: \"kubernetes.io/projected/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-kube-api-access-mnpkv\") pod \"dnsmasq-dns-5c9776ccc5-wkfgs\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.534432 4780 generic.go:334] "Generic (PLEG): container finished" podID="50917050-1856-4678-b62e-c0e165cb3d6c" containerID="48fcc54d3d11c4b6abf849b6b88d982df04943b26932a60524cc943d5dd3a171" exitCode=0 Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.534574 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-88btm" event={"ID":"50917050-1856-4678-b62e-c0e165cb3d6c","Type":"ContainerDied","Data":"48fcc54d3d11c4b6abf849b6b88d982df04943b26932a60524cc943d5dd3a171"} Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.535453 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.537844 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61b91736-8f72-474a-ab9a-8071b34b8458-logs\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.543443 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tsgd\" (UniqueName: \"kubernetes.io/projected/61b91736-8f72-474a-ab9a-8071b34b8458-kube-api-access-6tsgd\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.544770 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-scripts\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.544840 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/61b91736-8f72-474a-ab9a-8071b34b8458-etc-machine-id\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.549682 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data-custom\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.550174 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.652885 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-scripts\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.676502 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/61b91736-8f72-474a-ab9a-8071b34b8458-etc-machine-id\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.676792 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data-custom\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.676989 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.677269 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.677410 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61b91736-8f72-474a-ab9a-8071b34b8458-logs\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.677512 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tsgd\" (UniqueName: \"kubernetes.io/projected/61b91736-8f72-474a-ab9a-8071b34b8458-kube-api-access-6tsgd\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.678292 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/61b91736-8f72-474a-ab9a-8071b34b8458-etc-machine-id\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.689427 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61b91736-8f72-474a-ab9a-8071b34b8458-logs\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.751541 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.753534 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-scripts\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.756202 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.761550 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.779848 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tsgd\" (UniqueName: \"kubernetes.io/projected/61b91736-8f72-474a-ab9a-8071b34b8458-kube-api-access-6tsgd\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.780771 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data-custom\") pod \"cinder-api-0\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " pod="openstack/cinder-api-0" Dec 10 11:16:25 crc kubenswrapper[4780]: I1210 11:16:25.948742 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:16:26 crc kubenswrapper[4780]: I1210 11:16:26.850745 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:16:26 crc kubenswrapper[4780]: I1210 11:16:26.857775 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:16:26 crc kubenswrapper[4780]: I1210 11:16:26.858698 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.034047 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.171372 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-nb\") pod \"50917050-1856-4678-b62e-c0e165cb3d6c\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.171982 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-svc\") pod \"50917050-1856-4678-b62e-c0e165cb3d6c\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.172421 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lj6j\" (UniqueName: \"kubernetes.io/projected/50917050-1856-4678-b62e-c0e165cb3d6c-kube-api-access-6lj6j\") pod \"50917050-1856-4678-b62e-c0e165cb3d6c\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.173158 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-config\") pod \"50917050-1856-4678-b62e-c0e165cb3d6c\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.173661 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-swift-storage-0\") pod \"50917050-1856-4678-b62e-c0e165cb3d6c\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.178618 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-sb\") pod \"50917050-1856-4678-b62e-c0e165cb3d6c\" (UID: \"50917050-1856-4678-b62e-c0e165cb3d6c\") " Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.211474 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50917050-1856-4678-b62e-c0e165cb3d6c-kube-api-access-6lj6j" (OuterVolumeSpecName: "kube-api-access-6lj6j") pod "50917050-1856-4678-b62e-c0e165cb3d6c" (UID: "50917050-1856-4678-b62e-c0e165cb3d6c"). InnerVolumeSpecName "kube-api-access-6lj6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.301624 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lj6j\" (UniqueName: \"kubernetes.io/projected/50917050-1856-4678-b62e-c0e165cb3d6c-kube-api-access-6lj6j\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.543679 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "50917050-1856-4678-b62e-c0e165cb3d6c" (UID: "50917050-1856-4678-b62e-c0e165cb3d6c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.554525 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "50917050-1856-4678-b62e-c0e165cb3d6c" (UID: "50917050-1856-4678-b62e-c0e165cb3d6c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.561850 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "50917050-1856-4678-b62e-c0e165cb3d6c" (UID: "50917050-1856-4678-b62e-c0e165cb3d6c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.591603 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.591662 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.591672 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.632589 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-config" (OuterVolumeSpecName: "config") pod "50917050-1856-4678-b62e-c0e165cb3d6c" (UID: "50917050-1856-4678-b62e-c0e165cb3d6c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.658111 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "50917050-1856-4678-b62e-c0e165cb3d6c" (UID: "50917050-1856-4678-b62e-c0e165cb3d6c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.699139 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.699192 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50917050-1856-4678-b62e-c0e165cb3d6c-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.716262 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-88btm" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.716269 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-88btm" event={"ID":"50917050-1856-4678-b62e-c0e165cb3d6c","Type":"ContainerDied","Data":"f6ec91e260bfe51b1b102b9fb54bf3171b562329875b1b92dde70b1836c91478"} Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.718341 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.718384 4780 scope.go:117] "RemoveContainer" containerID="48fcc54d3d11c4b6abf849b6b88d982df04943b26932a60524cc943d5dd3a171" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.773376 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad","Type":"ContainerStarted","Data":"e1b4906b04e6d4fd4d1a30cae73bc876f479a149c6bc55cf14ee49614a2fd1b3"} Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.846533 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.846856 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:16:27 crc kubenswrapper[4780]: I1210 11:16:27.883694 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-wkfgs"] Dec 10 11:16:28 crc kubenswrapper[4780]: I1210 11:16:28.053170 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-88btm"] Dec 10 11:16:28 crc kubenswrapper[4780]: I1210 11:16:28.080211 4780 scope.go:117] "RemoveContainer" containerID="7aec1fea9e85dfda98c5ef3dde03c440320447f04fb7542721402af2cd306de0" Dec 10 11:16:28 crc kubenswrapper[4780]: I1210 11:16:28.090612 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-88btm"] Dec 10 11:16:28 crc kubenswrapper[4780]: I1210 11:16:28.179215 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:16:28 crc kubenswrapper[4780]: I1210 11:16:28.865814 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"61b91736-8f72-474a-ab9a-8071b34b8458","Type":"ContainerStarted","Data":"5017c513fe0dd1f7cfbcdb16d94d89056c2d210a35b0e283f12a6fa44dce4963"} Dec 10 11:16:28 crc kubenswrapper[4780]: I1210 11:16:28.875113 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" event={"ID":"7e45c931-6a45-4eb5-b511-3be5b47c2f4f","Type":"ContainerStarted","Data":"6f95dcac5251b2475340432d6ea8ce559a86a527c357e9efc597e80c7ec3a760"} Dec 10 11:16:29 crc kubenswrapper[4780]: I1210 11:16:29.029071 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:16:29 crc kubenswrapper[4780]: I1210 11:16:29.095303 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-75499b8cb8-8226n" Dec 10 11:16:30 crc kubenswrapper[4780]: I1210 11:16:30.006005 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50917050-1856-4678-b62e-c0e165cb3d6c" path="/var/lib/kubelet/pods/50917050-1856-4678-b62e-c0e165cb3d6c/volumes" Dec 10 11:16:30 crc kubenswrapper[4780]: I1210 11:16:30.020218 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" event={"ID":"7e45c931-6a45-4eb5-b511-3be5b47c2f4f","Type":"ContainerStarted","Data":"f6ace8dfe64718b23c500adcd34c8fe4675d4c79cd3cf8b4e4e9695435e57217"} Dec 10 11:16:31 crc kubenswrapper[4780]: I1210 11:16:31.122296 4780 generic.go:334] "Generic (PLEG): container finished" podID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" containerID="f6ace8dfe64718b23c500adcd34c8fe4675d4c79cd3cf8b4e4e9695435e57217" exitCode=0 Dec 10 11:16:31 crc kubenswrapper[4780]: I1210 11:16:31.122903 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" event={"ID":"7e45c931-6a45-4eb5-b511-3be5b47c2f4f","Type":"ContainerDied","Data":"f6ace8dfe64718b23c500adcd34c8fe4675d4c79cd3cf8b4e4e9695435e57217"} Dec 10 11:16:31 crc kubenswrapper[4780]: I1210 11:16:31.132474 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"61b91736-8f72-474a-ab9a-8071b34b8458","Type":"ContainerStarted","Data":"7a0f17241623d0b7a10d412d7f9d4ed1f4ce00f6f86a018165d2d6c067871eac"} Dec 10 11:16:31 crc kubenswrapper[4780]: I1210 11:16:31.271415 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-8579f5f4d5-t2zl7" Dec 10 11:16:31 crc kubenswrapper[4780]: I1210 11:16:31.581160 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-684545bb8-fmwfm" podUID="2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.203:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:16:31 crc kubenswrapper[4780]: I1210 11:16:31.961245 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:16:31 crc kubenswrapper[4780]: I1210 11:16:31.962845 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:16:31 crc kubenswrapper[4780]: I1210 11:16:31.971487 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:32 crc kubenswrapper[4780]: I1210 11:16:32.236757 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" event={"ID":"7e45c931-6a45-4eb5-b511-3be5b47c2f4f","Type":"ContainerStarted","Data":"970290ae16f5d622a134ec7215bb08fd80d8d9c966aa67a88af90738884d8672"} Dec 10 11:16:32 crc kubenswrapper[4780]: I1210 11:16:32.270254 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad","Type":"ContainerStarted","Data":"ab820b77c0b60a6dcff19329e78086c21314c501f2e40f7c34cdaf1d264ff34c"} Dec 10 11:16:32 crc kubenswrapper[4780]: I1210 11:16:32.562798 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:32 crc kubenswrapper[4780]: I1210 11:16:32.959725 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:16:32 crc kubenswrapper[4780]: E1210 11:16:32.960155 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.235363 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 10 11:16:33 crc kubenswrapper[4780]: E1210 11:16:33.236453 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50917050-1856-4678-b62e-c0e165cb3d6c" containerName="init" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.236605 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="50917050-1856-4678-b62e-c0e165cb3d6c" containerName="init" Dec 10 11:16:33 crc kubenswrapper[4780]: E1210 11:16:33.236702 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50917050-1856-4678-b62e-c0e165cb3d6c" containerName="dnsmasq-dns" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.236770 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="50917050-1856-4678-b62e-c0e165cb3d6c" containerName="dnsmasq-dns" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.237184 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="50917050-1856-4678-b62e-c0e165cb3d6c" containerName="dnsmasq-dns" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.240304 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.249573 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-2lzv2" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.250300 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.250464 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.288991 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.350072 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api-log" containerID="cri-o://7a0f17241623d0b7a10d412d7f9d4ed1f4ce00f6f86a018165d2d6c067871eac" gracePeriod=30 Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.350739 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"61b91736-8f72-474a-ab9a-8071b34b8458","Type":"ContainerStarted","Data":"42f10a63308c54591ad505ace74d1d8c57eacc83cfc28a457b5dea8002c02559"} Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.350905 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.351331 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.351120 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api" containerID="cri-o://42f10a63308c54591ad505ace74d1d8c57eacc83cfc28a457b5dea8002c02559" gracePeriod=30 Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.360385 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bvhk\" (UniqueName: \"kubernetes.io/projected/e74ddef3-dfb9-4409-9920-1cad0dc2492c-kube-api-access-4bvhk\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.360580 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e74ddef3-dfb9-4409-9920-1cad0dc2492c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.360798 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e74ddef3-dfb9-4409-9920-1cad0dc2492c-openstack-config\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.360967 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e74ddef3-dfb9-4409-9920-1cad0dc2492c-openstack-config-secret\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.403040 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" podStartSLOduration=9.403002781 podStartE2EDuration="9.403002781s" podCreationTimestamp="2025-12-10 11:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:16:33.379503429 +0000 UTC m=+1898.232896872" watchObservedRunningTime="2025-12-10 11:16:33.403002781 +0000 UTC m=+1898.256396244" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.459365 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=9.459328065 podStartE2EDuration="9.459328065s" podCreationTimestamp="2025-12-10 11:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:16:33.443185111 +0000 UTC m=+1898.296578564" watchObservedRunningTime="2025-12-10 11:16:33.459328065 +0000 UTC m=+1898.312721508" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.465828 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bvhk\" (UniqueName: \"kubernetes.io/projected/e74ddef3-dfb9-4409-9920-1cad0dc2492c-kube-api-access-4bvhk\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.466165 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e74ddef3-dfb9-4409-9920-1cad0dc2492c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.466380 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e74ddef3-dfb9-4409-9920-1cad0dc2492c-openstack-config\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.466532 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e74ddef3-dfb9-4409-9920-1cad0dc2492c-openstack-config-secret\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.474433 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e74ddef3-dfb9-4409-9920-1cad0dc2492c-openstack-config\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.486977 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e74ddef3-dfb9-4409-9920-1cad0dc2492c-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.488757 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e74ddef3-dfb9-4409-9920-1cad0dc2492c-openstack-config-secret\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.524979 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bvhk\" (UniqueName: \"kubernetes.io/projected/e74ddef3-dfb9-4409-9920-1cad0dc2492c-kube-api-access-4bvhk\") pod \"openstackclient\" (UID: \"e74ddef3-dfb9-4409-9920-1cad0dc2492c\") " pod="openstack/openstackclient" Dec 10 11:16:33 crc kubenswrapper[4780]: I1210 11:16:33.579941 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 11:16:34 crc kubenswrapper[4780]: I1210 11:16:34.372942 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad","Type":"ContainerStarted","Data":"9ac6cd54caff2b52e8650e6f398b5b05baaad8aee031bff01f3cb589b7092393"} Dec 10 11:16:34 crc kubenswrapper[4780]: I1210 11:16:34.388189 4780 generic.go:334] "Generic (PLEG): container finished" podID="61b91736-8f72-474a-ab9a-8071b34b8458" containerID="7a0f17241623d0b7a10d412d7f9d4ed1f4ce00f6f86a018165d2d6c067871eac" exitCode=143 Dec 10 11:16:34 crc kubenswrapper[4780]: I1210 11:16:34.388321 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"61b91736-8f72-474a-ab9a-8071b34b8458","Type":"ContainerDied","Data":"7a0f17241623d0b7a10d412d7f9d4ed1f4ce00f6f86a018165d2d6c067871eac"} Dec 10 11:16:34 crc kubenswrapper[4780]: I1210 11:16:34.404999 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=7.172094234 podStartE2EDuration="10.404959383s" podCreationTimestamp="2025-12-10 11:16:24 +0000 UTC" firstStartedPulling="2025-12-10 11:16:26.885102387 +0000 UTC m=+1891.738495830" lastFinishedPulling="2025-12-10 11:16:30.117967536 +0000 UTC m=+1894.971360979" observedRunningTime="2025-12-10 11:16:34.403764182 +0000 UTC m=+1899.257157625" watchObservedRunningTime="2025-12-10 11:16:34.404959383 +0000 UTC m=+1899.258352826" Dec 10 11:16:34 crc kubenswrapper[4780]: I1210 11:16:34.502685 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 10 11:16:34 crc kubenswrapper[4780]: W1210 11:16:34.508031 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode74ddef3_dfb9_4409_9920_1cad0dc2492c.slice/crio-1700096b2288be52d9406d4da401c1606f8535f008918058aca9ea0142b8615d WatchSource:0}: Error finding container 1700096b2288be52d9406d4da401c1606f8535f008918058aca9ea0142b8615d: Status 404 returned error can't find the container with id 1700096b2288be52d9406d4da401c1606f8535f008918058aca9ea0142b8615d Dec 10 11:16:34 crc kubenswrapper[4780]: I1210 11:16:34.936355 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:35 crc kubenswrapper[4780]: I1210 11:16:35.440266 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e74ddef3-dfb9-4409-9920-1cad0dc2492c","Type":"ContainerStarted","Data":"1700096b2288be52d9406d4da401c1606f8535f008918058aca9ea0142b8615d"} Dec 10 11:16:35 crc kubenswrapper[4780]: I1210 11:16:35.447131 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 10 11:16:35 crc kubenswrapper[4780]: I1210 11:16:35.594999 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-684545bb8-fmwfm" Dec 10 11:16:35 crc kubenswrapper[4780]: I1210 11:16:35.706958 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-56ccbfd9d6-x59x5"] Dec 10 11:16:35 crc kubenswrapper[4780]: I1210 11:16:35.707375 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api-log" containerID="cri-o://7f9a9989ef7beb4efab75e5acb363453094cb5853fd567812b5ca17940fd14b4" gracePeriod=30 Dec 10 11:16:35 crc kubenswrapper[4780]: I1210 11:16:35.708161 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api" containerID="cri-o://73545b11b81b920bd41b372f79463bc1e603f10ba535527eca9a3b8b2f0e0fe1" gracePeriod=30 Dec 10 11:16:36 crc kubenswrapper[4780]: E1210 11:16:36.214251 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod378bdac2_e552_48df_8bac_fd4300d016e5.slice/crio-conmon-7f9a9989ef7beb4efab75e5acb363453094cb5853fd567812b5ca17940fd14b4.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:16:36 crc kubenswrapper[4780]: I1210 11:16:36.467107 4780 generic.go:334] "Generic (PLEG): container finished" podID="378bdac2-e552-48df-8bac-fd4300d016e5" containerID="7f9a9989ef7beb4efab75e5acb363453094cb5853fd567812b5ca17940fd14b4" exitCode=143 Dec 10 11:16:36 crc kubenswrapper[4780]: I1210 11:16:36.467199 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56ccbfd9d6-x59x5" event={"ID":"378bdac2-e552-48df-8bac-fd4300d016e5","Type":"ContainerDied","Data":"7f9a9989ef7beb4efab75e5acb363453094cb5853fd567812b5ca17940fd14b4"} Dec 10 11:16:38 crc kubenswrapper[4780]: I1210 11:16:38.992357 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": read tcp 10.217.0.2:39854->10.217.0.201:9311: read: connection reset by peer" Dec 10 11:16:38 crc kubenswrapper[4780]: I1210 11:16:38.992475 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56ccbfd9d6-x59x5" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.201:9311/healthcheck\": read tcp 10.217.0.2:39844->10.217.0.201:9311: read: connection reset by peer" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.561770 4780 generic.go:334] "Generic (PLEG): container finished" podID="378bdac2-e552-48df-8bac-fd4300d016e5" containerID="73545b11b81b920bd41b372f79463bc1e603f10ba535527eca9a3b8b2f0e0fe1" exitCode=0 Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.562364 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56ccbfd9d6-x59x5" event={"ID":"378bdac2-e552-48df-8bac-fd4300d016e5","Type":"ContainerDied","Data":"73545b11b81b920bd41b372f79463bc1e603f10ba535527eca9a3b8b2f0e0fe1"} Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.705381 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.825280 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-combined-ca-bundle\") pod \"378bdac2-e552-48df-8bac-fd4300d016e5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.825704 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzfrj\" (UniqueName: \"kubernetes.io/projected/378bdac2-e552-48df-8bac-fd4300d016e5-kube-api-access-xzfrj\") pod \"378bdac2-e552-48df-8bac-fd4300d016e5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.825834 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data\") pod \"378bdac2-e552-48df-8bac-fd4300d016e5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.825971 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/378bdac2-e552-48df-8bac-fd4300d016e5-logs\") pod \"378bdac2-e552-48df-8bac-fd4300d016e5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.826089 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data-custom\") pod \"378bdac2-e552-48df-8bac-fd4300d016e5\" (UID: \"378bdac2-e552-48df-8bac-fd4300d016e5\") " Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.827836 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/378bdac2-e552-48df-8bac-fd4300d016e5-logs" (OuterVolumeSpecName: "logs") pod "378bdac2-e552-48df-8bac-fd4300d016e5" (UID: "378bdac2-e552-48df-8bac-fd4300d016e5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.835256 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/378bdac2-e552-48df-8bac-fd4300d016e5-kube-api-access-xzfrj" (OuterVolumeSpecName: "kube-api-access-xzfrj") pod "378bdac2-e552-48df-8bac-fd4300d016e5" (UID: "378bdac2-e552-48df-8bac-fd4300d016e5"). InnerVolumeSpecName "kube-api-access-xzfrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.838866 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "378bdac2-e552-48df-8bac-fd4300d016e5" (UID: "378bdac2-e552-48df-8bac-fd4300d016e5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.877157 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "378bdac2-e552-48df-8bac-fd4300d016e5" (UID: "378bdac2-e552-48df-8bac-fd4300d016e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.907506 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data" (OuterVolumeSpecName: "config-data") pod "378bdac2-e552-48df-8bac-fd4300d016e5" (UID: "378bdac2-e552-48df-8bac-fd4300d016e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.930691 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzfrj\" (UniqueName: \"kubernetes.io/projected/378bdac2-e552-48df-8bac-fd4300d016e5-kube-api-access-xzfrj\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.930745 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.930758 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/378bdac2-e552-48df-8bac-fd4300d016e5-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.930771 4780 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:39 crc kubenswrapper[4780]: I1210 11:16:39.930780 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/378bdac2-e552-48df-8bac-fd4300d016e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.431081 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.590942 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6579b964d7-f7kj9"] Dec 10 11:16:40 crc kubenswrapper[4780]: E1210 11:16:40.591865 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api-log" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.591898 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api-log" Dec 10 11:16:40 crc kubenswrapper[4780]: E1210 11:16:40.591964 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.591981 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.592411 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api-log" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.592473 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" containerName="barbican-api" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.598096 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.604226 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.605651 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.606139 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.607014 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56ccbfd9d6-x59x5" event={"ID":"378bdac2-e552-48df-8bac-fd4300d016e5","Type":"ContainerDied","Data":"c37cee7a29fa7d5477cc7bdef6707f05420c51edd452e4a7ca3be5f3ad210c4a"} Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.607108 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56ccbfd9d6-x59x5" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.607113 4780 scope.go:117] "RemoveContainer" containerID="73545b11b81b920bd41b372f79463bc1e603f10ba535527eca9a3b8b2f0e0fe1" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.617748 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6579b964d7-f7kj9"] Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.712381 4780 scope.go:117] "RemoveContainer" containerID="7f9a9989ef7beb4efab75e5acb363453094cb5853fd567812b5ca17940fd14b4" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.723333 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-56ccbfd9d6-x59x5"] Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.728519 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73526536-c600-49b3-b73d-2897a05ce69e-run-httpd\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.728772 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73526536-c600-49b3-b73d-2897a05ce69e-etc-swift\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.728850 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-config-data\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.728945 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-public-tls-certs\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.729211 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-combined-ca-bundle\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.729543 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-internal-tls-certs\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.729696 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73526536-c600-49b3-b73d-2897a05ce69e-log-httpd\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.730006 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tktdj\" (UniqueName: \"kubernetes.io/projected/73526536-c600-49b3-b73d-2897a05ce69e-kube-api-access-tktdj\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.752228 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-56ccbfd9d6-x59x5"] Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.760285 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.835027 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73526536-c600-49b3-b73d-2897a05ce69e-etc-swift\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.835115 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-config-data\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.835160 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-public-tls-certs\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.835215 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-combined-ca-bundle\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.835289 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-internal-tls-certs\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.835329 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73526536-c600-49b3-b73d-2897a05ce69e-log-httpd\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.835401 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tktdj\" (UniqueName: \"kubernetes.io/projected/73526536-c600-49b3-b73d-2897a05ce69e-kube-api-access-tktdj\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.835577 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73526536-c600-49b3-b73d-2897a05ce69e-run-httpd\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.836368 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73526536-c600-49b3-b73d-2897a05ce69e-run-httpd\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.837097 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73526536-c600-49b3-b73d-2897a05ce69e-log-httpd\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.843509 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73526536-c600-49b3-b73d-2897a05ce69e-etc-swift\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.846900 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-combined-ca-bundle\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.893004 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-config-data\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.893467 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-internal-tls-certs\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.894339 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-jppm6"] Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.894971 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" podUID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" containerName="dnsmasq-dns" containerID="cri-o://381eef57163efa671b512b67a68dd98bb6df98d41dec1d5227db4ca11e682322" gracePeriod=10 Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.920382 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tktdj\" (UniqueName: \"kubernetes.io/projected/73526536-c600-49b3-b73d-2897a05ce69e-kube-api-access-tktdj\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.930729 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73526536-c600-49b3-b73d-2897a05ce69e-public-tls-certs\") pod \"swift-proxy-6579b964d7-f7kj9\" (UID: \"73526536-c600-49b3-b73d-2897a05ce69e\") " pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:40 crc kubenswrapper[4780]: I1210 11:16:40.996545 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:41 crc kubenswrapper[4780]: I1210 11:16:41.025728 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 10 11:16:41 crc kubenswrapper[4780]: I1210 11:16:41.182034 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:16:41 crc kubenswrapper[4780]: I1210 11:16:41.632178 4780 generic.go:334] "Generic (PLEG): container finished" podID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" containerID="381eef57163efa671b512b67a68dd98bb6df98d41dec1d5227db4ca11e682322" exitCode=0 Dec 10 11:16:41 crc kubenswrapper[4780]: I1210 11:16:41.632578 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" event={"ID":"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a","Type":"ContainerDied","Data":"381eef57163efa671b512b67a68dd98bb6df98d41dec1d5227db4ca11e682322"} Dec 10 11:16:41 crc kubenswrapper[4780]: I1210 11:16:41.635007 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerName="cinder-scheduler" containerID="cri-o://ab820b77c0b60a6dcff19329e78086c21314c501f2e40f7c34cdaf1d264ff34c" gracePeriod=30 Dec 10 11:16:41 crc kubenswrapper[4780]: I1210 11:16:41.638718 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerName="probe" containerID="cri-o://9ac6cd54caff2b52e8650e6f398b5b05baaad8aee031bff01f3cb589b7092393" gracePeriod=30 Dec 10 11:16:41 crc kubenswrapper[4780]: I1210 11:16:41.884679 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.000912 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="378bdac2-e552-48df-8bac-fd4300d016e5" path="/var/lib/kubelet/pods/378bdac2-e552-48df-8bac-fd4300d016e5/volumes" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.037484 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-nb\") pod \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.038416 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-sb\") pod \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.038466 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvdh4\" (UniqueName: \"kubernetes.io/projected/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-kube-api-access-fvdh4\") pod \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.038534 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-config\") pod \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.038555 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-swift-storage-0\") pod \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.038610 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-svc\") pod \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\" (UID: \"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a\") " Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.086583 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-kube-api-access-fvdh4" (OuterVolumeSpecName: "kube-api-access-fvdh4") pod "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" (UID: "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a"). InnerVolumeSpecName "kube-api-access-fvdh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.152829 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvdh4\" (UniqueName: \"kubernetes.io/projected/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-kube-api-access-fvdh4\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.179322 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6579b964d7-f7kj9"] Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.207909 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" (UID: "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.220087 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" (UID: "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.232169 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" (UID: "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.244793 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" (UID: "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.257283 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.257334 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.257351 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.257363 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.258732 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-config" (OuterVolumeSpecName: "config") pod "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" (UID: "4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.360667 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.523504 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.682563 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6579b964d7-f7kj9" event={"ID":"73526536-c600-49b3-b73d-2897a05ce69e","Type":"ContainerStarted","Data":"ea5dd0f2843ad679d6441619099234a8665c2bd073f0ddc7fd9680f56dd5de69"} Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.692246 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" event={"ID":"4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a","Type":"ContainerDied","Data":"8774574c49525d5aacd5ff747b8efa04e35ecaebebcd709854bb7b53445749f0"} Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.692338 4780 scope.go:117] "RemoveContainer" containerID="381eef57163efa671b512b67a68dd98bb6df98d41dec1d5227db4ca11e682322" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.692574 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-jppm6" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.757483 4780 scope.go:117] "RemoveContainer" containerID="88f1eb62beda2093547d81dd9a04f959fee0e9d6a5b4ddc6da0d6599b1d33956" Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.775998 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-jppm6"] Dec 10 11:16:42 crc kubenswrapper[4780]: I1210 11:16:42.805113 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-jppm6"] Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.737450 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6579b964d7-f7kj9" event={"ID":"73526536-c600-49b3-b73d-2897a05ce69e","Type":"ContainerStarted","Data":"3c1dfdfe819b6ad189c4edbbbce1234b2545a0cacf0e860779bc6110700b7c2e"} Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.757505 4780 generic.go:334] "Generic (PLEG): container finished" podID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerID="9ac6cd54caff2b52e8650e6f398b5b05baaad8aee031bff01f3cb589b7092393" exitCode=0 Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.758179 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad","Type":"ContainerDied","Data":"9ac6cd54caff2b52e8650e6f398b5b05baaad8aee031bff01f3cb589b7092393"} Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.902852 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.903293 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="ceilometer-central-agent" containerID="cri-o://a9abfaf819bc7a6dd38ec6a5932abeb0842946e7c626f84e70b57f864190f47e" gracePeriod=30 Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.904013 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="ceilometer-notification-agent" containerID="cri-o://b4fa9fbc70f5848ae91210eb2c0d2674c29b75dde7c0216f7c8d756f517045f2" gracePeriod=30 Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.904079 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="sg-core" containerID="cri-o://e95636e8f3a430b662ce91c03ec6e1e291e4540c649d4504ea274b5088557cbb" gracePeriod=30 Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.904110 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="proxy-httpd" containerID="cri-o://7326f7d0eb9117d311b8a0fce5df5cce919e314e9848cc272e29de1558568644" gracePeriod=30 Dec 10 11:16:43 crc kubenswrapper[4780]: I1210 11:16:43.983627 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" path="/var/lib/kubelet/pods/4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a/volumes" Dec 10 11:16:44 crc kubenswrapper[4780]: I1210 11:16:44.790197 4780 generic.go:334] "Generic (PLEG): container finished" podID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerID="e95636e8f3a430b662ce91c03ec6e1e291e4540c649d4504ea274b5088557cbb" exitCode=2 Dec 10 11:16:44 crc kubenswrapper[4780]: I1210 11:16:44.792109 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerDied","Data":"e95636e8f3a430b662ce91c03ec6e1e291e4540c649d4504ea274b5088557cbb"} Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.809343 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6579b964d7-f7kj9" event={"ID":"73526536-c600-49b3-b73d-2897a05ce69e","Type":"ContainerStarted","Data":"6f96fedd93f7b6dafbcfc6c65fa6b73fb6b57ca97160557c8051b4e5a32f9a56"} Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.810520 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.810559 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.818475 4780 generic.go:334] "Generic (PLEG): container finished" podID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerID="7326f7d0eb9117d311b8a0fce5df5cce919e314e9848cc272e29de1558568644" exitCode=0 Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.818836 4780 generic.go:334] "Generic (PLEG): container finished" podID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerID="a9abfaf819bc7a6dd38ec6a5932abeb0842946e7c626f84e70b57f864190f47e" exitCode=0 Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.818563 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerDied","Data":"7326f7d0eb9117d311b8a0fce5df5cce919e314e9848cc272e29de1558568644"} Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.819043 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerDied","Data":"a9abfaf819bc7a6dd38ec6a5932abeb0842946e7c626f84e70b57f864190f47e"} Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.822687 4780 generic.go:334] "Generic (PLEG): container finished" podID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerID="ab820b77c0b60a6dcff19329e78086c21314c501f2e40f7c34cdaf1d264ff34c" exitCode=0 Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.823079 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad","Type":"ContainerDied","Data":"ab820b77c0b60a6dcff19329e78086c21314c501f2e40f7c34cdaf1d264ff34c"} Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.868819 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6579b964d7-f7kj9" podStartSLOduration=5.868781254 podStartE2EDuration="5.868781254s" podCreationTimestamp="2025-12-10 11:16:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:16:45.848219748 +0000 UTC m=+1910.701613201" watchObservedRunningTime="2025-12-10 11:16:45.868781254 +0000 UTC m=+1910.722174697" Dec 10 11:16:45 crc kubenswrapper[4780]: I1210 11:16:45.994825 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.206:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:16:47 crc kubenswrapper[4780]: I1210 11:16:47.859405 4780 generic.go:334] "Generic (PLEG): container finished" podID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerID="b4fa9fbc70f5848ae91210eb2c0d2674c29b75dde7c0216f7c8d756f517045f2" exitCode=0 Dec 10 11:16:47 crc kubenswrapper[4780]: I1210 11:16:47.859967 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerDied","Data":"b4fa9fbc70f5848ae91210eb2c0d2674c29b75dde7c0216f7c8d756f517045f2"} Dec 10 11:16:47 crc kubenswrapper[4780]: I1210 11:16:47.961122 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:16:47 crc kubenswrapper[4780]: E1210 11:16:47.961433 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:16:48 crc kubenswrapper[4780]: I1210 11:16:48.090582 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5ccbff869-dj8wl" Dec 10 11:16:48 crc kubenswrapper[4780]: I1210 11:16:48.287692 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5dd58fdf76-hn7kd"] Dec 10 11:16:48 crc kubenswrapper[4780]: I1210 11:16:48.288487 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5dd58fdf76-hn7kd" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" containerName="neutron-api" containerID="cri-o://6b66df0c1a3d7d39c43b559fb877ad8ff5d4b2ba89f2344ba205907ddd7c597c" gracePeriod=30 Dec 10 11:16:48 crc kubenswrapper[4780]: I1210 11:16:48.288667 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5dd58fdf76-hn7kd" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" containerName="neutron-httpd" containerID="cri-o://6414a1e874a5a81e8a8ed5be1dc1dce97b0ed571884db3a456d15ce58ce8f03d" gracePeriod=30 Dec 10 11:16:48 crc kubenswrapper[4780]: I1210 11:16:48.547324 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 10 11:16:48 crc kubenswrapper[4780]: I1210 11:16:48.894854 4780 generic.go:334] "Generic (PLEG): container finished" podID="0744f766-309b-4632-a522-ba8d51a5fa80" containerID="6414a1e874a5a81e8a8ed5be1dc1dce97b0ed571884db3a456d15ce58ce8f03d" exitCode=0 Dec 10 11:16:48 crc kubenswrapper[4780]: I1210 11:16:48.895080 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd58fdf76-hn7kd" event={"ID":"0744f766-309b-4632-a522-ba8d51a5fa80","Type":"ContainerDied","Data":"6414a1e874a5a81e8a8ed5be1dc1dce97b0ed571884db3a456d15ce58ce8f03d"} Dec 10 11:16:51 crc kubenswrapper[4780]: I1210 11:16:51.090514 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:51 crc kubenswrapper[4780]: I1210 11:16:51.096841 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6579b964d7-f7kj9" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.124833 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-f7d475897-pp967"] Dec 10 11:16:53 crc kubenswrapper[4780]: E1210 11:16:53.126186 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" containerName="dnsmasq-dns" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.126216 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" containerName="dnsmasq-dns" Dec 10 11:16:53 crc kubenswrapper[4780]: E1210 11:16:53.126270 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" containerName="init" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.126280 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" containerName="init" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.126691 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf4f40c-d9e0-46ea-bfa6-cfa15257d19a" containerName="dnsmasq-dns" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.164649 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.174385 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.175650 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-9plgw" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.177150 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-f7d475897-pp967"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.180573 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.226609 4780 generic.go:334] "Generic (PLEG): container finished" podID="0744f766-309b-4632-a522-ba8d51a5fa80" containerID="6b66df0c1a3d7d39c43b559fb877ad8ff5d4b2ba89f2344ba205907ddd7c597c" exitCode=0 Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.226686 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd58fdf76-hn7kd" event={"ID":"0744f766-309b-4632-a522-ba8d51a5fa80","Type":"ContainerDied","Data":"6b66df0c1a3d7d39c43b559fb877ad8ff5d4b2ba89f2344ba205907ddd7c597c"} Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.269775 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-combined-ca-bundle\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.270449 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data-custom\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.270743 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h5xt\" (UniqueName: \"kubernetes.io/projected/068aca35-6c22-4164-bf63-481836e82331-kube-api-access-9h5xt\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.270907 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.286063 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-54f5bc9f87-vp45v"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.288326 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.298153 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.338702 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-2nwlx"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.352266 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.379537 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data-custom\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.379744 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-combined-ca-bundle\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.379860 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-combined-ca-bundle\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.379970 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data-custom\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.380002 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.380208 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h5xt\" (UniqueName: \"kubernetes.io/projected/068aca35-6c22-4164-bf63-481836e82331-kube-api-access-9h5xt\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.380292 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dbgc\" (UniqueName: \"kubernetes.io/projected/19201430-5ecc-4a0b-ad28-5cdfff8d037a-kube-api-access-5dbgc\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.380401 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.398340 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-combined-ca-bundle\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.398577 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data-custom\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.427579 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-54f5bc9f87-vp45v"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.431544 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.466035 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7bc68bff5f-xvzgg"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.468519 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.471271 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h5xt\" (UniqueName: \"kubernetes.io/projected/068aca35-6c22-4164-bf63-481836e82331-kube-api-access-9h5xt\") pod \"heat-engine-f7d475897-pp967\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.482416 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.485305 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dbgc\" (UniqueName: \"kubernetes.io/projected/19201430-5ecc-4a0b-ad28-5cdfff8d037a-kube-api-access-5dbgc\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.485413 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-config\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.485539 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.492981 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data-custom\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.493903 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-combined-ca-bundle\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.494196 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzq76\" (UniqueName: \"kubernetes.io/projected/8400fa88-5e91-417f-9495-12e8efcf25d0-kube-api-access-pzq76\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.494300 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.494344 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.494380 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.494727 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.508404 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.510687 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data-custom\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.510891 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7bc68bff5f-xvzgg"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.526405 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-combined-ca-bundle\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.532793 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dbgc\" (UniqueName: \"kubernetes.io/projected/19201430-5ecc-4a0b-ad28-5cdfff8d037a-kube-api-access-5dbgc\") pod \"heat-cfnapi-54f5bc9f87-vp45v\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.544009 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.597386 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-combined-ca-bundle\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.604941 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-2nwlx"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.611653 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.611876 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzq76\" (UniqueName: \"kubernetes.io/projected/8400fa88-5e91-417f-9495-12e8efcf25d0-kube-api-access-pzq76\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.611984 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.612042 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.612360 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.612428 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nztz\" (UniqueName: \"kubernetes.io/projected/77c41a0f-d539-496a-85f5-f6aec31747a9-kube-api-access-7nztz\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.612464 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-config\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.612571 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.612684 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data-custom\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.614617 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.616672 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.619258 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.624334 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-config\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.625013 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.633452 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.678374 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzq76\" (UniqueName: \"kubernetes.io/projected/8400fa88-5e91-417f-9495-12e8efcf25d0-kube-api-access-pzq76\") pod \"dnsmasq-dns-7756b9d78c-2nwlx\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.718511 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nztz\" (UniqueName: \"kubernetes.io/projected/77c41a0f-d539-496a-85f5-f6aec31747a9-kube-api-access-7nztz\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.718627 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data-custom\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.718740 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-combined-ca-bundle\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.718841 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.733520 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.741446 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.767139 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-combined-ca-bundle\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.789072 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data-custom\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.800366 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nztz\" (UniqueName: \"kubernetes.io/projected/77c41a0f-d539-496a-85f5-f6aec31747a9-kube-api-access-7nztz\") pod \"heat-api-7bc68bff5f-xvzgg\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.845194 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-wdmbj"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.854514 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.865538 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.879858 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-wdmbj"] Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.930126 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a58ed76c-f42e-41ca-8e79-3b656701cdea-operator-scripts\") pod \"nova-api-db-create-wdmbj\" (UID: \"a58ed76c-f42e-41ca-8e79-3b656701cdea\") " pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:16:53 crc kubenswrapper[4780]: I1210 11:16:53.930278 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgxnw\" (UniqueName: \"kubernetes.io/projected/a58ed76c-f42e-41ca-8e79-3b656701cdea-kube-api-access-pgxnw\") pod \"nova-api-db-create-wdmbj\" (UID: \"a58ed76c-f42e-41ca-8e79-3b656701cdea\") " pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.062138 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgxnw\" (UniqueName: \"kubernetes.io/projected/a58ed76c-f42e-41ca-8e79-3b656701cdea-kube-api-access-pgxnw\") pod \"nova-api-db-create-wdmbj\" (UID: \"a58ed76c-f42e-41ca-8e79-3b656701cdea\") " pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.081823 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a58ed76c-f42e-41ca-8e79-3b656701cdea-operator-scripts\") pod \"nova-api-db-create-wdmbj\" (UID: \"a58ed76c-f42e-41ca-8e79-3b656701cdea\") " pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.089231 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a58ed76c-f42e-41ca-8e79-3b656701cdea-operator-scripts\") pod \"nova-api-db-create-wdmbj\" (UID: \"a58ed76c-f42e-41ca-8e79-3b656701cdea\") " pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.103617 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-kjc6l"] Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.161312 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.201499 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgxnw\" (UniqueName: \"kubernetes.io/projected/a58ed76c-f42e-41ca-8e79-3b656701cdea-kube-api-access-pgxnw\") pod \"nova-api-db-create-wdmbj\" (UID: \"a58ed76c-f42e-41ca-8e79-3b656701cdea\") " pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.261392 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.349748 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzfbs\" (UniqueName: \"kubernetes.io/projected/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-kube-api-access-fzfbs\") pod \"nova-cell0-db-create-kjc6l\" (UID: \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\") " pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.350204 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-operator-scripts\") pod \"nova-cell0-db-create-kjc6l\" (UID: \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\") " pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.361202 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-122d-account-create-update-tj4vk"] Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.364152 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.373502 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.416435 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-kjc6l"] Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.430484 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-122d-account-create-update-tj4vk"] Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.445263 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-tw2jn"] Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.462675 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzfbs\" (UniqueName: \"kubernetes.io/projected/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-kube-api-access-fzfbs\") pod \"nova-cell0-db-create-kjc6l\" (UID: \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\") " pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.463007 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l87b\" (UniqueName: \"kubernetes.io/projected/b1f2fac2-0797-448c-b4d8-98ebd4eff159-kube-api-access-2l87b\") pod \"nova-api-122d-account-create-update-tj4vk\" (UID: \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\") " pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.463305 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-operator-scripts\") pod \"nova-cell0-db-create-kjc6l\" (UID: \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\") " pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.467957 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-tw2jn"] Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.468076 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-operator-scripts\") pod \"nova-cell0-db-create-kjc6l\" (UID: \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\") " pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.468097 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1f2fac2-0797-448c-b4d8-98ebd4eff159-operator-scripts\") pod \"nova-api-122d-account-create-update-tj4vk\" (UID: \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\") " pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.474743 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.506997 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-2c72-account-create-update-22zbp"] Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.509250 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzfbs\" (UniqueName: \"kubernetes.io/projected/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-kube-api-access-fzfbs\") pod \"nova-cell0-db-create-kjc6l\" (UID: \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\") " pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.517523 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.534743 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-2c72-account-create-update-22zbp"] Dec 10 11:16:54 crc kubenswrapper[4780]: I1210 11:16:54.535650 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:54.997883 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.009068 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9a943a1-6945-4117-a123-5c96d85b4e77-operator-scripts\") pod \"nova-cell0-2c72-account-create-update-22zbp\" (UID: \"d9a943a1-6945-4117-a123-5c96d85b4e77\") " pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.009287 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87bfb484-2111-41a3-99d5-52e8db80f098-operator-scripts\") pod \"nova-cell1-db-create-tw2jn\" (UID: \"87bfb484-2111-41a3-99d5-52e8db80f098\") " pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.009509 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8dxp\" (UniqueName: \"kubernetes.io/projected/87bfb484-2111-41a3-99d5-52e8db80f098-kube-api-access-w8dxp\") pod \"nova-cell1-db-create-tw2jn\" (UID: \"87bfb484-2111-41a3-99d5-52e8db80f098\") " pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.009566 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1f2fac2-0797-448c-b4d8-98ebd4eff159-operator-scripts\") pod \"nova-api-122d-account-create-update-tj4vk\" (UID: \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\") " pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.009606 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4sd4\" (UniqueName: \"kubernetes.io/projected/d9a943a1-6945-4117-a123-5c96d85b4e77-kube-api-access-c4sd4\") pod \"nova-cell0-2c72-account-create-update-22zbp\" (UID: \"d9a943a1-6945-4117-a123-5c96d85b4e77\") " pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.009742 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l87b\" (UniqueName: \"kubernetes.io/projected/b1f2fac2-0797-448c-b4d8-98ebd4eff159-kube-api-access-2l87b\") pod \"nova-api-122d-account-create-update-tj4vk\" (UID: \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\") " pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.013162 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1f2fac2-0797-448c-b4d8-98ebd4eff159-operator-scripts\") pod \"nova-api-122d-account-create-update-tj4vk\" (UID: \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\") " pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.045575 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-9ebb-account-create-update-nb7g9"] Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.056543 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.067970 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.072209 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9ebb-account-create-update-nb7g9"] Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.079699 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l87b\" (UniqueName: \"kubernetes.io/projected/b1f2fac2-0797-448c-b4d8-98ebd4eff159-kube-api-access-2l87b\") pod \"nova-api-122d-account-create-update-tj4vk\" (UID: \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\") " pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.111785 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8dxp\" (UniqueName: \"kubernetes.io/projected/87bfb484-2111-41a3-99d5-52e8db80f098-kube-api-access-w8dxp\") pod \"nova-cell1-db-create-tw2jn\" (UID: \"87bfb484-2111-41a3-99d5-52e8db80f098\") " pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.112446 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4sd4\" (UniqueName: \"kubernetes.io/projected/d9a943a1-6945-4117-a123-5c96d85b4e77-kube-api-access-c4sd4\") pod \"nova-cell0-2c72-account-create-update-22zbp\" (UID: \"d9a943a1-6945-4117-a123-5c96d85b4e77\") " pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.113141 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9a943a1-6945-4117-a123-5c96d85b4e77-operator-scripts\") pod \"nova-cell0-2c72-account-create-update-22zbp\" (UID: \"d9a943a1-6945-4117-a123-5c96d85b4e77\") " pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.113419 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87bfb484-2111-41a3-99d5-52e8db80f098-operator-scripts\") pod \"nova-cell1-db-create-tw2jn\" (UID: \"87bfb484-2111-41a3-99d5-52e8db80f098\") " pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.116790 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9a943a1-6945-4117-a123-5c96d85b4e77-operator-scripts\") pod \"nova-cell0-2c72-account-create-update-22zbp\" (UID: \"d9a943a1-6945-4117-a123-5c96d85b4e77\") " pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.118558 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87bfb484-2111-41a3-99d5-52e8db80f098-operator-scripts\") pod \"nova-cell1-db-create-tw2jn\" (UID: \"87bfb484-2111-41a3-99d5-52e8db80f098\") " pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.143149 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4sd4\" (UniqueName: \"kubernetes.io/projected/d9a943a1-6945-4117-a123-5c96d85b4e77-kube-api-access-c4sd4\") pod \"nova-cell0-2c72-account-create-update-22zbp\" (UID: \"d9a943a1-6945-4117-a123-5c96d85b4e77\") " pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.168981 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8dxp\" (UniqueName: \"kubernetes.io/projected/87bfb484-2111-41a3-99d5-52e8db80f098-kube-api-access-w8dxp\") pod \"nova-cell1-db-create-tw2jn\" (UID: \"87bfb484-2111-41a3-99d5-52e8db80f098\") " pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.217449 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00fa7636-13e7-49b7-8ce0-dac88eab63d7-operator-scripts\") pod \"nova-cell1-9ebb-account-create-update-nb7g9\" (UID: \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\") " pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.217723 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x22fh\" (UniqueName: \"kubernetes.io/projected/00fa7636-13e7-49b7-8ce0-dac88eab63d7-kube-api-access-x22fh\") pod \"nova-cell1-9ebb-account-create-update-nb7g9\" (UID: \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\") " pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.299843 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.324658 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x22fh\" (UniqueName: \"kubernetes.io/projected/00fa7636-13e7-49b7-8ce0-dac88eab63d7-kube-api-access-x22fh\") pod \"nova-cell1-9ebb-account-create-update-nb7g9\" (UID: \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\") " pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.324853 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00fa7636-13e7-49b7-8ce0-dac88eab63d7-operator-scripts\") pod \"nova-cell1-9ebb-account-create-update-nb7g9\" (UID: \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\") " pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.325973 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00fa7636-13e7-49b7-8ce0-dac88eab63d7-operator-scripts\") pod \"nova-cell1-9ebb-account-create-update-nb7g9\" (UID: \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\") " pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.335740 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.351719 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.368777 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x22fh\" (UniqueName: \"kubernetes.io/projected/00fa7636-13e7-49b7-8ce0-dac88eab63d7-kube-api-access-x22fh\") pod \"nova-cell1-9ebb-account-create-update-nb7g9\" (UID: \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\") " pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:16:55 crc kubenswrapper[4780]: I1210 11:16:55.608082 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:16:56 crc kubenswrapper[4780]: I1210 11:16:56.107472 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.206:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:17:00 crc kubenswrapper[4780]: I1210 11:17:00.958848 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:17:00 crc kubenswrapper[4780]: E1210 11:17:00.959786 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.169762 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-86d9dd567b-q6nsm"] Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.172690 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.194047 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7c6599c744-svfvf"] Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.196502 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.235966 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7788568699-swzvp"] Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.239013 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.268519 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-config-data\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.268740 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfms7\" (UniqueName: \"kubernetes.io/projected/85332002-7a9a-4738-a4cd-5b66c34658b2-kube-api-access-qfms7\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.269044 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data-custom\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.269187 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-combined-ca-bundle\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.269274 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-combined-ca-bundle\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.269416 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95k4d\" (UniqueName: \"kubernetes.io/projected/c09a9d73-c35e-40da-b545-ebf1b8724896-kube-api-access-95k4d\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.269474 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-config-data-custom\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.277368 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.305450 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7c6599c744-svfvf"] Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.356766 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-86d9dd567b-q6nsm"] Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.385830 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gmgq\" (UniqueName: \"kubernetes.io/projected/2df2ca18-27cb-48e0-842c-f346f4703a37-kube-api-access-5gmgq\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.385947 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.385986 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.386083 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-config-data\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.388840 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfms7\" (UniqueName: \"kubernetes.io/projected/85332002-7a9a-4738-a4cd-5b66c34658b2-kube-api-access-qfms7\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.389014 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-combined-ca-bundle\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.389626 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data-custom\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.389857 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-combined-ca-bundle\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.390017 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-combined-ca-bundle\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.390063 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data-custom\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.390267 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95k4d\" (UniqueName: \"kubernetes.io/projected/c09a9d73-c35e-40da-b545-ebf1b8724896-kube-api-access-95k4d\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.390329 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-config-data-custom\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.392590 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7788568699-swzvp"] Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.400282 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-combined-ca-bundle\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.404645 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-config-data-custom\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.405095 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data-custom\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.411231 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-config-data\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.415758 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfms7\" (UniqueName: \"kubernetes.io/projected/85332002-7a9a-4738-a4cd-5b66c34658b2-kube-api-access-qfms7\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.416800 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95k4d\" (UniqueName: \"kubernetes.io/projected/c09a9d73-c35e-40da-b545-ebf1b8724896-kube-api-access-95k4d\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.427599 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data\") pod \"heat-api-7c6599c744-svfvf\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.454778 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85332002-7a9a-4738-a4cd-5b66c34658b2-combined-ca-bundle\") pod \"heat-engine-86d9dd567b-q6nsm\" (UID: \"85332002-7a9a-4738-a4cd-5b66c34658b2\") " pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.497724 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data-custom\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.498404 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gmgq\" (UniqueName: \"kubernetes.io/projected/2df2ca18-27cb-48e0-842c-f346f4703a37-kube-api-access-5gmgq\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.498448 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.498571 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-combined-ca-bundle\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.510435 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-combined-ca-bundle\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.511232 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.521447 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.522818 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data-custom\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.531428 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gmgq\" (UniqueName: \"kubernetes.io/projected/2df2ca18-27cb-48e0-842c-f346f4703a37-kube-api-access-5gmgq\") pod \"heat-cfnapi-7788568699-swzvp\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.550414 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.583450 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.878111 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:17:01 crc kubenswrapper[4780]: I1210 11:17:01.899373 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.310183 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-scripts\") pod \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.312244 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-run-httpd\") pod \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.312380 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-config-data\") pod \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.312564 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-combined-ca-bundle\") pod \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.312592 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-etc-machine-id\") pod \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.312699 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rm5l\" (UniqueName: \"kubernetes.io/projected/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-kube-api-access-5rm5l\") pod \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.312792 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-sg-core-conf-yaml\") pod \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.312819 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-log-httpd\") pod \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\" (UID: \"1f099496-ee6c-44ff-9c42-c6584b8fb2ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.313031 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data\") pod \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.313078 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" (UID: "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.313323 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1f099496-ee6c-44ff-9c42-c6584b8fb2ad" (UID: "1f099496-ee6c-44ff-9c42-c6584b8fb2ad"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.320332 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtqss\" (UniqueName: \"kubernetes.io/projected/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-kube-api-access-gtqss\") pod \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.320499 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-scripts\") pod \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.320574 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-combined-ca-bundle\") pod \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.320611 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data-custom\") pod \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\" (UID: \"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad\") " Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.324634 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1f099496-ee6c-44ff-9c42-c6584b8fb2ad" (UID: "1f099496-ee6c-44ff-9c42-c6584b8fb2ad"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.329377 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-scripts" (OuterVolumeSpecName: "scripts") pod "1f099496-ee6c-44ff-9c42-c6584b8fb2ad" (UID: "1f099496-ee6c-44ff-9c42-c6584b8fb2ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.331304 4780 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.331346 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.331358 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.331368 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.346545 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-kube-api-access-gtqss" (OuterVolumeSpecName: "kube-api-access-gtqss") pod "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" (UID: "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad"). InnerVolumeSpecName "kube-api-access-gtqss". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.356677 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" (UID: "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.358388 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-scripts" (OuterVolumeSpecName: "scripts") pod "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" (UID: "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.359489 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-kube-api-access-5rm5l" (OuterVolumeSpecName: "kube-api-access-5rm5l") pod "1f099496-ee6c-44ff-9c42-c6584b8fb2ad" (UID: "1f099496-ee6c-44ff-9c42-c6584b8fb2ad"). InnerVolumeSpecName "kube-api-access-5rm5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.458643 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rm5l\" (UniqueName: \"kubernetes.io/projected/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-kube-api-access-5rm5l\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.459180 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtqss\" (UniqueName: \"kubernetes.io/projected/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-kube-api-access-gtqss\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.459194 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.459207 4780 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.492365 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1f099496-ee6c-44ff-9c42-c6584b8fb2ad" (UID: "1f099496-ee6c-44ff-9c42-c6584b8fb2ad"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: E1210 11:17:02.530759 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Dec 10 11:17:02 crc kubenswrapper[4780]: E1210 11:17:02.531017 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fch549h5f9h9dh94h594hddh5f4h5f5h5d6h569h67fh694h85h84h58dh5c7hc8h687h8fh696h5d7h685h5bfh57fhb9h77hf8hdbh9dh5cbh55cq,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_CA_CERT,Value:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4bvhk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(e74ddef3-dfb9-4409-9920-1cad0dc2492c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:17:02 crc kubenswrapper[4780]: E1210 11:17:02.536616 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="e74ddef3-dfb9-4409-9920-1cad0dc2492c" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.565121 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.708730 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" (UID: "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.843104 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.846408 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.846690 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f099496-ee6c-44ff-9c42-c6584b8fb2ad","Type":"ContainerDied","Data":"dbd23b877d5a4d2609f06c17387931552678f5268574b1a3447ea07aa49ab53a"} Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.846746 4780 scope.go:117] "RemoveContainer" containerID="7326f7d0eb9117d311b8a0fce5df5cce919e314e9848cc272e29de1558568644" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.893802 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad","Type":"ContainerDied","Data":"e1b4906b04e6d4fd4d1a30cae73bc876f479a149c6bc55cf14ee49614a2fd1b3"} Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.894041 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.894058 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f099496-ee6c-44ff-9c42-c6584b8fb2ad" (UID: "1f099496-ee6c-44ff-9c42-c6584b8fb2ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.911853 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data" (OuterVolumeSpecName: "config-data") pod "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" (UID: "bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:02 crc kubenswrapper[4780]: E1210 11:17:02.914195 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="e74ddef3-dfb9-4409-9920-1cad0dc2492c" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.955882 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:02 crc kubenswrapper[4780]: I1210 11:17:02.955968 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.111238 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-config-data" (OuterVolumeSpecName: "config-data") pod "1f099496-ee6c-44ff-9c42-c6584b8fb2ad" (UID: "1f099496-ee6c-44ff-9c42-c6584b8fb2ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.163198 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f099496-ee6c-44ff-9c42-c6584b8fb2ad-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.164645 4780 scope.go:117] "RemoveContainer" containerID="e95636e8f3a430b662ce91c03ec6e1e291e4540c649d4504ea274b5088557cbb" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.455610 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.464757 4780 scope.go:117] "RemoveContainer" containerID="b4fa9fbc70f5848ae91210eb2c0d2674c29b75dde7c0216f7c8d756f517045f2" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.476846 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.511362 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.534444 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.563575 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:03 crc kubenswrapper[4780]: E1210 11:17:03.564757 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerName="probe" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.564780 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerName="probe" Dec 10 11:17:03 crc kubenswrapper[4780]: E1210 11:17:03.564833 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="sg-core" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.564840 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="sg-core" Dec 10 11:17:03 crc kubenswrapper[4780]: E1210 11:17:03.564858 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="ceilometer-central-agent" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.564865 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="ceilometer-central-agent" Dec 10 11:17:03 crc kubenswrapper[4780]: E1210 11:17:03.564874 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="proxy-httpd" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.564880 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="proxy-httpd" Dec 10 11:17:03 crc kubenswrapper[4780]: E1210 11:17:03.564906 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerName="cinder-scheduler" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.564930 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerName="cinder-scheduler" Dec 10 11:17:03 crc kubenswrapper[4780]: E1210 11:17:03.564959 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="ceilometer-notification-agent" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.564969 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="ceilometer-notification-agent" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.565373 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerName="probe" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.565390 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="ceilometer-central-agent" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.565406 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="ceilometer-notification-agent" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.565432 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" containerName="cinder-scheduler" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.565441 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="sg-core" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.565453 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" containerName="proxy-httpd" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.573731 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.577494 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.577718 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.591515 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.602885 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.607046 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.657779 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.687672 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.687770 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2r4x\" (UniqueName: \"kubernetes.io/projected/274acc05-0f10-48e5-8fb8-44bc1ddca126-kube-api-access-v2r4x\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.687836 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.688357 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvkcc\" (UniqueName: \"kubernetes.io/projected/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-kube-api-access-wvkcc\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.688417 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.688451 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.688529 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-log-httpd\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.689054 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-scripts\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.689336 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-run-httpd\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.689571 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-config-data\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.689603 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/274acc05-0f10-48e5-8fb8-44bc1ddca126-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.689645 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-scripts\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.689749 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-config-data\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.698084 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.754258 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-wdmbj"] Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794184 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794251 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794314 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-log-httpd\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794433 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-scripts\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794673 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-run-httpd\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794806 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-config-data\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794836 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/274acc05-0f10-48e5-8fb8-44bc1ddca126-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794867 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-scripts\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.794975 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-config-data\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.795102 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.795142 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2r4x\" (UniqueName: \"kubernetes.io/projected/274acc05-0f10-48e5-8fb8-44bc1ddca126-kube-api-access-v2r4x\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.795198 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.795292 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvkcc\" (UniqueName: \"kubernetes.io/projected/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-kube-api-access-wvkcc\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.809554 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/274acc05-0f10-48e5-8fb8-44bc1ddca126-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.882829 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-log-httpd\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.910588 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-run-httpd\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.920543 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.925326 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-config-data\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.931048 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.936712 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-config-data\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.946689 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.961612 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2r4x\" (UniqueName: \"kubernetes.io/projected/274acc05-0f10-48e5-8fb8-44bc1ddca126-kube-api-access-v2r4x\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.964406 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-scripts\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.965883 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.966527 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/274acc05-0f10-48e5-8fb8-44bc1ddca126-scripts\") pod \"cinder-scheduler-0\" (UID: \"274acc05-0f10-48e5-8fb8-44bc1ddca126\") " pod="openstack/cinder-scheduler-0" Dec 10 11:17:03 crc kubenswrapper[4780]: I1210 11:17:03.980617 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvkcc\" (UniqueName: \"kubernetes.io/projected/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-kube-api-access-wvkcc\") pod \"ceilometer-0\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " pod="openstack/ceilometer-0" Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.055504 4780 generic.go:334] "Generic (PLEG): container finished" podID="61b91736-8f72-474a-ab9a-8071b34b8458" containerID="42f10a63308c54591ad505ace74d1d8c57eacc83cfc28a457b5dea8002c02559" exitCode=137 Dec 10 11:17:04 crc kubenswrapper[4780]: W1210 11:17:04.076114 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda58ed76c_f42e_41ca_8e79_3b656701cdea.slice/crio-bac2c8aeda3379777cc017d734c3a05f2953e676c697bfda6a15ea2587123e23 WatchSource:0}: Error finding container bac2c8aeda3379777cc017d734c3a05f2953e676c697bfda6a15ea2587123e23: Status 404 returned error can't find the container with id bac2c8aeda3379777cc017d734c3a05f2953e676c697bfda6a15ea2587123e23 Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.157905 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f099496-ee6c-44ff-9c42-c6584b8fb2ad" path="/var/lib/kubelet/pods/1f099496-ee6c-44ff-9c42-c6584b8fb2ad/volumes" Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.177011 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad" path="/var/lib/kubelet/pods/bbd2dcb7-ece1-4a2f-83ec-a4aa131720ad/volumes" Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.178741 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"61b91736-8f72-474a-ab9a-8071b34b8458","Type":"ContainerDied","Data":"42f10a63308c54591ad505ace74d1d8c57eacc83cfc28a457b5dea8002c02559"} Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.447697 4780 scope.go:117] "RemoveContainer" containerID="a9abfaf819bc7a6dd38ec6a5932abeb0842946e7c626f84e70b57f864190f47e" Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.469318 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.494669 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.507985 4780 scope.go:117] "RemoveContainer" containerID="9ac6cd54caff2b52e8650e6f398b5b05baaad8aee031bff01f3cb589b7092393" Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.692617 4780 scope.go:117] "RemoveContainer" containerID="ab820b77c0b60a6dcff19329e78086c21314c501f2e40f7c34cdaf1d264ff34c" Dec 10 11:17:04 crc kubenswrapper[4780]: I1210 11:17:04.972706 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.075264 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-httpd-config\") pod \"0744f766-309b-4632-a522-ba8d51a5fa80\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.075399 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-combined-ca-bundle\") pod \"0744f766-309b-4632-a522-ba8d51a5fa80\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.075491 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-ovndb-tls-certs\") pod \"0744f766-309b-4632-a522-ba8d51a5fa80\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.075536 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-899t9\" (UniqueName: \"kubernetes.io/projected/0744f766-309b-4632-a522-ba8d51a5fa80-kube-api-access-899t9\") pod \"0744f766-309b-4632-a522-ba8d51a5fa80\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.075566 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-config\") pod \"0744f766-309b-4632-a522-ba8d51a5fa80\" (UID: \"0744f766-309b-4632-a522-ba8d51a5fa80\") " Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.087661 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0744f766-309b-4632-a522-ba8d51a5fa80-kube-api-access-899t9" (OuterVolumeSpecName: "kube-api-access-899t9") pod "0744f766-309b-4632-a522-ba8d51a5fa80" (UID: "0744f766-309b-4632-a522-ba8d51a5fa80"). InnerVolumeSpecName "kube-api-access-899t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.089367 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "0744f766-309b-4632-a522-ba8d51a5fa80" (UID: "0744f766-309b-4632-a522-ba8d51a5fa80"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.109182 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd58fdf76-hn7kd" event={"ID":"0744f766-309b-4632-a522-ba8d51a5fa80","Type":"ContainerDied","Data":"de146baaac0a79072ff6fa2acb84e65759346b833a2a7d10a945694f6629ebe3"} Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.109248 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5dd58fdf76-hn7kd" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.109258 4780 scope.go:117] "RemoveContainer" containerID="6414a1e874a5a81e8a8ed5be1dc1dce97b0ed571884db3a456d15ce58ce8f03d" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.117935 4780 generic.go:334] "Generic (PLEG): container finished" podID="a58ed76c-f42e-41ca-8e79-3b656701cdea" containerID="048524975a1a80f19a60acb0a5ce2cd364087a155f6c9082cc77617ae2fabac8" exitCode=0 Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.118054 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wdmbj" event={"ID":"a58ed76c-f42e-41ca-8e79-3b656701cdea","Type":"ContainerDied","Data":"048524975a1a80f19a60acb0a5ce2cd364087a155f6c9082cc77617ae2fabac8"} Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.118088 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wdmbj" event={"ID":"a58ed76c-f42e-41ca-8e79-3b656701cdea","Type":"ContainerStarted","Data":"bac2c8aeda3379777cc017d734c3a05f2953e676c697bfda6a15ea2587123e23"} Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.168413 4780 scope.go:117] "RemoveContainer" containerID="6b66df0c1a3d7d39c43b559fb877ad8ff5d4b2ba89f2344ba205907ddd7c597c" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.223323 4780 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.223389 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-899t9\" (UniqueName: \"kubernetes.io/projected/0744f766-309b-4632-a522-ba8d51a5fa80-kube-api-access-899t9\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.344616 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0744f766-309b-4632-a522-ba8d51a5fa80" (UID: "0744f766-309b-4632-a522-ba8d51a5fa80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.349379 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-config" (OuterVolumeSpecName: "config") pod "0744f766-309b-4632-a522-ba8d51a5fa80" (UID: "0744f766-309b-4632-a522-ba8d51a5fa80"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.352542 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.352596 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.379270 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "0744f766-309b-4632-a522-ba8d51a5fa80" (UID: "0744f766-309b-4632-a522-ba8d51a5fa80"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.435533 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7bc68bff5f-xvzgg"] Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.455687 4780 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0744f766-309b-4632-a522-ba8d51a5fa80-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.517576 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5bffc7b484-bhjz4"] Dec 10 11:17:05 crc kubenswrapper[4780]: E1210 11:17:05.522037 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" containerName="neutron-api" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.522078 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" containerName="neutron-api" Dec 10 11:17:05 crc kubenswrapper[4780]: E1210 11:17:05.522141 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" containerName="neutron-httpd" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.522151 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" containerName="neutron-httpd" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.522636 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" containerName="neutron-httpd" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.522653 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" containerName="neutron-api" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.526325 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.535954 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.536146 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.536805 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-54f5bc9f87-vp45v"] Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.622781 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5bffc7b484-bhjz4"] Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.674249 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swd59\" (UniqueName: \"kubernetes.io/projected/43cf9913-8179-4d01-a9d8-40ae5078b366-kube-api-access-swd59\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.679584 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-internal-tls-certs\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.679740 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-combined-ca-bundle\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.679993 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-config-data\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.680242 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-public-tls-certs\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.680406 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-config-data-custom\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.686091 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5fbd6d5fcb-7dcpq"] Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.688560 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.693713 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.694094 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.716698 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5fbd6d5fcb-7dcpq"] Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.760877 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5dd58fdf76-hn7kd"] Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.791750 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-config-data-custom\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.792081 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swd59\" (UniqueName: \"kubernetes.io/projected/43cf9913-8179-4d01-a9d8-40ae5078b366-kube-api-access-swd59\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.792120 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-internal-tls-certs\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.792174 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-combined-ca-bundle\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.792359 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-config-data\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.792494 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-public-tls-certs\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.824020 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5dd58fdf76-hn7kd"] Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.839824 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-public-tls-certs\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.841976 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-config-data-custom\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.848773 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-combined-ca-bundle\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.852290 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-config-data\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.853261 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43cf9913-8179-4d01-a9d8-40ae5078b366-internal-tls-certs\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.859209 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swd59\" (UniqueName: \"kubernetes.io/projected/43cf9913-8179-4d01-a9d8-40ae5078b366-kube-api-access-swd59\") pod \"heat-api-5bffc7b484-bhjz4\" (UID: \"43cf9913-8179-4d01-a9d8-40ae5078b366\") " pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.896516 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-public-tls-certs\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.896642 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-combined-ca-bundle\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.897319 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-config-data-custom\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.897582 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-config-data\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.897751 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-internal-tls-certs\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.897776 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kblm\" (UniqueName: \"kubernetes.io/projected/0e464f35-41ce-4f1e-b728-c12bfb04abb4-kube-api-access-4kblm\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:05 crc kubenswrapper[4780]: I1210 11:17:05.966771 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.003027 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-config-data\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.003162 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-internal-tls-certs\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.003189 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kblm\" (UniqueName: \"kubernetes.io/projected/0e464f35-41ce-4f1e-b728-c12bfb04abb4-kube-api-access-4kblm\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.003311 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-public-tls-certs\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.003352 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-combined-ca-bundle\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.003560 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-config-data-custom\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.022512 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0744f766-309b-4632-a522-ba8d51a5fa80" path="/var/lib/kubelet/pods/0744f766-309b-4632-a522-ba8d51a5fa80/volumes" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.025556 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.032310 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-internal-tls-certs\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.049076 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-config-data\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.067907 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-public-tls-certs\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.077831 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7bc68bff5f-xvzgg"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.080941 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kblm\" (UniqueName: \"kubernetes.io/projected/0e464f35-41ce-4f1e-b728-c12bfb04abb4-kube-api-access-4kblm\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.095697 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-combined-ca-bundle\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.100944 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e464f35-41ce-4f1e-b728-c12bfb04abb4-config-data-custom\") pod \"heat-cfnapi-5fbd6d5fcb-7dcpq\" (UID: \"0e464f35-41ce-4f1e-b728-c12bfb04abb4\") " pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.106248 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-scripts\") pod \"61b91736-8f72-474a-ab9a-8071b34b8458\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.107174 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61b91736-8f72-474a-ab9a-8071b34b8458-logs\") pod \"61b91736-8f72-474a-ab9a-8071b34b8458\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.107481 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tsgd\" (UniqueName: \"kubernetes.io/projected/61b91736-8f72-474a-ab9a-8071b34b8458-kube-api-access-6tsgd\") pod \"61b91736-8f72-474a-ab9a-8071b34b8458\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.107551 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/61b91736-8f72-474a-ab9a-8071b34b8458-etc-machine-id\") pod \"61b91736-8f72-474a-ab9a-8071b34b8458\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.107581 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data\") pod \"61b91736-8f72-474a-ab9a-8071b34b8458\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.107604 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data-custom\") pod \"61b91736-8f72-474a-ab9a-8071b34b8458\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.107686 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-combined-ca-bundle\") pod \"61b91736-8f72-474a-ab9a-8071b34b8458\" (UID: \"61b91736-8f72-474a-ab9a-8071b34b8458\") " Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.113540 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61b91736-8f72-474a-ab9a-8071b34b8458-logs" (OuterVolumeSpecName: "logs") pod "61b91736-8f72-474a-ab9a-8071b34b8458" (UID: "61b91736-8f72-474a-ab9a-8071b34b8458"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.126397 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/61b91736-8f72-474a-ab9a-8071b34b8458-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "61b91736-8f72-474a-ab9a-8071b34b8458" (UID: "61b91736-8f72-474a-ab9a-8071b34b8458"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.128783 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-2nwlx"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.153442 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-scripts" (OuterVolumeSpecName: "scripts") pod "61b91736-8f72-474a-ab9a-8071b34b8458" (UID: "61b91736-8f72-474a-ab9a-8071b34b8458"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.154770 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "61b91736-8f72-474a-ab9a-8071b34b8458" (UID: "61b91736-8f72-474a-ab9a-8071b34b8458"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.180683 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61b91736-8f72-474a-ab9a-8071b34b8458-kube-api-access-6tsgd" (OuterVolumeSpecName: "kube-api-access-6tsgd") pod "61b91736-8f72-474a-ab9a-8071b34b8458" (UID: "61b91736-8f72-474a-ab9a-8071b34b8458"). InnerVolumeSpecName "kube-api-access-6tsgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.211397 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-2c72-account-create-update-22zbp"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.240264 4780 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/61b91736-8f72-474a-ab9a-8071b34b8458-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.241755 4780 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.241850 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.241874 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61b91736-8f72-474a-ab9a-8071b34b8458-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.241890 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tsgd\" (UniqueName: \"kubernetes.io/projected/61b91736-8f72-474a-ab9a-8071b34b8458-kube-api-access-6tsgd\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:06 crc kubenswrapper[4780]: W1210 11:17:06.252499 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod068aca35_6c22_4164_bf63_481836e82331.slice/crio-7d313c19a96f3874a8722b95a742a756b706add0bf4b7f904bcaa178705b257e WatchSource:0}: Error finding container 7d313c19a96f3874a8722b95a742a756b706add0bf4b7f904bcaa178705b257e: Status 404 returned error can't find the container with id 7d313c19a96f3874a8722b95a742a756b706add0bf4b7f904bcaa178705b257e Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.253743 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-122d-account-create-update-tj4vk"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.257234 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.259319 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.273827 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.274702 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-kjc6l"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.277502 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.278527 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"61b91736-8f72-474a-ab9a-8071b34b8458","Type":"ContainerDied","Data":"5017c513fe0dd1f7cfbcdb16d94d89056c2d210a35b0e283f12a6fa44dce4963"} Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.278644 4780 scope.go:117] "RemoveContainer" containerID="42f10a63308c54591ad505ace74d1d8c57eacc83cfc28a457b5dea8002c02559" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.282817 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data" (OuterVolumeSpecName: "config-data") pod "61b91736-8f72-474a-ab9a-8071b34b8458" (UID: "61b91736-8f72-474a-ab9a-8071b34b8458"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.296887 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-9ebb-account-create-update-nb7g9"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.354504 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.638598 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61b91736-8f72-474a-ab9a-8071b34b8458" (UID: "61b91736-8f72-474a-ab9a-8071b34b8458"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.674351 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-54f5bc9f87-vp45v"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.692705 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-f7d475897-pp967"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.693630 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.700784 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61b91736-8f72-474a-ab9a-8071b34b8458-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.703522 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-tw2jn"] Dec 10 11:17:06 crc kubenswrapper[4780]: I1210 11:17:06.919163 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7c6599c744-svfvf"] Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.011855 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-86d9dd567b-q6nsm"] Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.092211 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7788568699-swzvp"] Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.327397 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.375585 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-f7d475897-pp967" event={"ID":"068aca35-6c22-4164-bf63-481836e82331","Type":"ContainerStarted","Data":"7d313c19a96f3874a8722b95a742a756b706add0bf4b7f904bcaa178705b257e"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.377063 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" event={"ID":"19201430-5ecc-4a0b-ad28-5cdfff8d037a","Type":"ContainerStarted","Data":"bef4f52749f8d014f7e3c3c3c0760e193426372dec7eb4e1491517da38add708"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.378095 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7c6599c744-svfvf" event={"ID":"c09a9d73-c35e-40da-b545-ebf1b8724896","Type":"ContainerStarted","Data":"8f1ccd9b52bbbba5c547f5427d4232e6c0732e02c21cedc81f16df55bf6e12cf"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.380046 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kjc6l" event={"ID":"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990","Type":"ContainerStarted","Data":"b089d4c2a526e5dcac3119b06baa712fcd2e3464c8a49a564e0c610c3ea234a4"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.383605 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" event={"ID":"8400fa88-5e91-417f-9495-12e8efcf25d0","Type":"ContainerStarted","Data":"6255be7cb5b4ebace0937ced07c7842aafc7591bf78cdddd92658507def18af0"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.385472 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.392255 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" event={"ID":"00fa7636-13e7-49b7-8ce0-dac88eab63d7","Type":"ContainerStarted","Data":"7df620b06709be841d39024bcf829926b84623eaca7f82ee368e929e001a2eec"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.395706 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2c72-account-create-update-22zbp" event={"ID":"d9a943a1-6945-4117-a123-5c96d85b4e77","Type":"ContainerStarted","Data":"98184f23707ca7f7f520c47b3348bb15b666dfb6a00b16b67b8d28c49bee677f"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.403579 4780 scope.go:117] "RemoveContainer" containerID="7a0f17241623d0b7a10d412d7f9d4ed1f4ce00f6f86a018165d2d6c067871eac" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.417976 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7bc68bff5f-xvzgg" event={"ID":"77c41a0f-d539-496a-85f5-f6aec31747a9","Type":"ContainerStarted","Data":"85a8773a4f2e26c958af39d8523e5408587485475860847326a643cc87187ca1"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.430146 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-122d-account-create-update-tj4vk" event={"ID":"b1f2fac2-0797-448c-b4d8-98ebd4eff159","Type":"ContainerStarted","Data":"b67bed4b01f79b394fd2f8152b9fd7b3d46bbb73c4b247b0a879c59dedadf41b"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.448134 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-86d9dd567b-q6nsm" event={"ID":"85332002-7a9a-4738-a4cd-5b66c34658b2","Type":"ContainerStarted","Data":"364cec90bcbbbb89b0b88ab748156719a0afbed6ac04507526b688f963c247d8"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.469588 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:17:07 crc kubenswrapper[4780]: E1210 11:17:07.470530 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api-log" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.470554 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api-log" Dec 10 11:17:07 crc kubenswrapper[4780]: E1210 11:17:07.470579 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.470586 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.470909 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api-log" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.470950 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.472879 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.474661 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tw2jn" event={"ID":"87bfb484-2111-41a3-99d5-52e8db80f098","Type":"ContainerStarted","Data":"87ca209e1be0e82bc58e8cce24c0435fce40773f03deb46acf8f8065fdb2d97d"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.479333 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.479800 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.486064 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.496741 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7788568699-swzvp" event={"ID":"2df2ca18-27cb-48e0-842c-f346f4703a37","Type":"ContainerStarted","Data":"a07d749e986e9b647877729ec5ad2da3e27dbbbe142da9c4a1045ddbc02ad75d"} Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.516947 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.561056 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a0987c6-976b-4d4e-9456-4516cdaf53a0-logs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.561134 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-config-data-custom\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.561387 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-config-data\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.561428 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.561471 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.561681 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-scripts\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.561808 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfngs\" (UniqueName: \"kubernetes.io/projected/5a0987c6-976b-4d4e-9456-4516cdaf53a0-kube-api-access-zfngs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.561913 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a0987c6-976b-4d4e-9456-4516cdaf53a0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.562052 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.665625 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.672670 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-scripts\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.673141 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfngs\" (UniqueName: \"kubernetes.io/projected/5a0987c6-976b-4d4e-9456-4516cdaf53a0-kube-api-access-zfngs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.673412 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.673417 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a0987c6-976b-4d4e-9456-4516cdaf53a0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.673774 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.673964 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5a0987c6-976b-4d4e-9456-4516cdaf53a0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.674223 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a0987c6-976b-4d4e-9456-4516cdaf53a0-logs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.674334 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-config-data-custom\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.675086 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5a0987c6-976b-4d4e-9456-4516cdaf53a0-logs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.678342 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-config-data\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.678393 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.681361 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-scripts\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.684686 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.684814 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-config-data\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.688408 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-config-data-custom\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.712762 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a0987c6-976b-4d4e-9456-4516cdaf53a0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.737248 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfngs\" (UniqueName: \"kubernetes.io/projected/5a0987c6-976b-4d4e-9456-4516cdaf53a0-kube-api-access-zfngs\") pod \"cinder-api-0\" (UID: \"5a0987c6-976b-4d4e-9456-4516cdaf53a0\") " pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.829525 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.880061 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:07 crc kubenswrapper[4780]: I1210 11:17:07.911080 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:17:08 crc kubenswrapper[4780]: W1210 11:17:08.054537 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod462cb03c_f171_4b9e_8bf2_b63ec19e6a06.slice/crio-8be6df67bad5e3411c1aa681adc407659a2fd48b0dc8eb50672639198f1564b3 WatchSource:0}: Error finding container 8be6df67bad5e3411c1aa681adc407659a2fd48b0dc8eb50672639198f1564b3: Status 404 returned error can't find the container with id 8be6df67bad5e3411c1aa681adc407659a2fd48b0dc8eb50672639198f1564b3 Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.059333 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" path="/var/lib/kubelet/pods/61b91736-8f72-474a-ab9a-8071b34b8458/volumes" Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.061066 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5fbd6d5fcb-7dcpq"] Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.061114 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5bffc7b484-bhjz4"] Dec 10 11:17:08 crc kubenswrapper[4780]: W1210 11:17:08.082732 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod274acc05_0f10_48e5_8fb8_44bc1ddca126.slice/crio-7f1e6de9a3e5dbe25aedd36f3ddd92588252768a5cf4258946c40df4e285e4b9 WatchSource:0}: Error finding container 7f1e6de9a3e5dbe25aedd36f3ddd92588252768a5cf4258946c40df4e285e4b9: Status 404 returned error can't find the container with id 7f1e6de9a3e5dbe25aedd36f3ddd92588252768a5cf4258946c40df4e285e4b9 Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.386956 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.446955 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a58ed76c-f42e-41ca-8e79-3b656701cdea-operator-scripts\") pod \"a58ed76c-f42e-41ca-8e79-3b656701cdea\" (UID: \"a58ed76c-f42e-41ca-8e79-3b656701cdea\") " Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.447042 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgxnw\" (UniqueName: \"kubernetes.io/projected/a58ed76c-f42e-41ca-8e79-3b656701cdea-kube-api-access-pgxnw\") pod \"a58ed76c-f42e-41ca-8e79-3b656701cdea\" (UID: \"a58ed76c-f42e-41ca-8e79-3b656701cdea\") " Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.448239 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a58ed76c-f42e-41ca-8e79-3b656701cdea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a58ed76c-f42e-41ca-8e79-3b656701cdea" (UID: "a58ed76c-f42e-41ca-8e79-3b656701cdea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.448398 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a58ed76c-f42e-41ca-8e79-3b656701cdea-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.467608 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58ed76c-f42e-41ca-8e79-3b656701cdea-kube-api-access-pgxnw" (OuterVolumeSpecName: "kube-api-access-pgxnw") pod "a58ed76c-f42e-41ca-8e79-3b656701cdea" (UID: "a58ed76c-f42e-41ca-8e79-3b656701cdea"). InnerVolumeSpecName "kube-api-access-pgxnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.540457 4780 generic.go:334] "Generic (PLEG): container finished" podID="f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990" containerID="c9d38332c37fbfbdad98cfaa7496e6ae5334f6bcf682e91dc45b45fa4343e0d6" exitCode=0 Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.540681 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kjc6l" event={"ID":"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990","Type":"ContainerDied","Data":"c9d38332c37fbfbdad98cfaa7496e6ae5334f6bcf682e91dc45b45fa4343e0d6"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.550854 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerStarted","Data":"8be6df67bad5e3411c1aa681adc407659a2fd48b0dc8eb50672639198f1564b3"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.558652 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgxnw\" (UniqueName: \"kubernetes.io/projected/a58ed76c-f42e-41ca-8e79-3b656701cdea-kube-api-access-pgxnw\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.578577 4780 generic.go:334] "Generic (PLEG): container finished" podID="b1f2fac2-0797-448c-b4d8-98ebd4eff159" containerID="8a05503156564e80d3285b9e0e559aa401ec58508cb35e2e5195b78c239dab14" exitCode=0 Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.578727 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-122d-account-create-update-tj4vk" event={"ID":"b1f2fac2-0797-448c-b4d8-98ebd4eff159","Type":"ContainerDied","Data":"8a05503156564e80d3285b9e0e559aa401ec58508cb35e2e5195b78c239dab14"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.601563 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" event={"ID":"00fa7636-13e7-49b7-8ce0-dac88eab63d7","Type":"ContainerStarted","Data":"969f13fc29150a1fb6e7ff4c3c5474894ef36f1dba90993637cecbad493ae9d4"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.629950 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" event={"ID":"0e464f35-41ce-4f1e-b728-c12bfb04abb4","Type":"ContainerStarted","Data":"b32fc4cb34c3a4345a3e4199104d869211b96761c848c3040b35a9e97efdb893"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.635876 4780 generic.go:334] "Generic (PLEG): container finished" podID="d9a943a1-6945-4117-a123-5c96d85b4e77" containerID="194a2f6ed27dc0727e61c4c42fdfaefc27f2efebac0daed63bba695fb2d09a33" exitCode=0 Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.636141 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2c72-account-create-update-22zbp" event={"ID":"d9a943a1-6945-4117-a123-5c96d85b4e77","Type":"ContainerDied","Data":"194a2f6ed27dc0727e61c4c42fdfaefc27f2efebac0daed63bba695fb2d09a33"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.655522 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wdmbj" Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.659012 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wdmbj" event={"ID":"a58ed76c-f42e-41ca-8e79-3b656701cdea","Type":"ContainerDied","Data":"bac2c8aeda3379777cc017d734c3a05f2953e676c697bfda6a15ea2587123e23"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.659086 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bac2c8aeda3379777cc017d734c3a05f2953e676c697bfda6a15ea2587123e23" Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.694826 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5bffc7b484-bhjz4" event={"ID":"43cf9913-8179-4d01-a9d8-40ae5078b366","Type":"ContainerStarted","Data":"e5eb05494e188bb5279f1e316f88dacf4f0f3e9cbb4066a7992234b468515795"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.726497 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"274acc05-0f10-48e5-8fb8-44bc1ddca126","Type":"ContainerStarted","Data":"7f1e6de9a3e5dbe25aedd36f3ddd92588252768a5cf4258946c40df4e285e4b9"} Dec 10 11:17:08 crc kubenswrapper[4780]: I1210 11:17:08.928416 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.162449 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerStarted","Data":"6ee625cf7513efd4f88067c7645ead80ccb89341b0b7ba6639feb075d3f53231"} Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.176991 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a0987c6-976b-4d4e-9456-4516cdaf53a0","Type":"ContainerStarted","Data":"036c584604fb71abd936e30790d1e51555309c1f770f065f03e106bf62658438"} Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.185438 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-86d9dd567b-q6nsm" event={"ID":"85332002-7a9a-4738-a4cd-5b66c34658b2","Type":"ContainerStarted","Data":"f8827e4d4277a19984ec5449d781f75d9a755b17ffd4e821c5bb57ca0afdcb3f"} Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.186049 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.203577 4780 generic.go:334] "Generic (PLEG): container finished" podID="87bfb484-2111-41a3-99d5-52e8db80f098" containerID="acf7303f3fefe241e01fdccd9fcc36846ef4a752ef9cd94acaa10a5bc3e563c5" exitCode=0 Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.203703 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tw2jn" event={"ID":"87bfb484-2111-41a3-99d5-52e8db80f098","Type":"ContainerDied","Data":"acf7303f3fefe241e01fdccd9fcc36846ef4a752ef9cd94acaa10a5bc3e563c5"} Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.209422 4780 generic.go:334] "Generic (PLEG): container finished" podID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerID="60ec72c619f2504a673e15784e8e1841b7f31ff8e3b7d73662f5bfe72f542ab4" exitCode=0 Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.209484 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" event={"ID":"8400fa88-5e91-417f-9495-12e8efcf25d0","Type":"ContainerDied","Data":"60ec72c619f2504a673e15784e8e1841b7f31ff8e3b7d73662f5bfe72f542ab4"} Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.234888 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"274acc05-0f10-48e5-8fb8-44bc1ddca126","Type":"ContainerStarted","Data":"116517725a29be1e8f31a027ffaec50fdf46842817a9098a6d30d01fe78059dc"} Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.255279 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-86d9dd567b-q6nsm" podStartSLOduration=9.255246457 podStartE2EDuration="9.255246457s" podCreationTimestamp="2025-12-10 11:17:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:17:10.228849481 +0000 UTC m=+1935.082242924" watchObservedRunningTime="2025-12-10 11:17:10.255246457 +0000 UTC m=+1935.108639900" Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.263338 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-f7d475897-pp967" event={"ID":"068aca35-6c22-4164-bf63-481836e82331","Type":"ContainerStarted","Data":"4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73"} Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.263544 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.283606 4780 generic.go:334] "Generic (PLEG): container finished" podID="00fa7636-13e7-49b7-8ce0-dac88eab63d7" containerID="969f13fc29150a1fb6e7ff4c3c5474894ef36f1dba90993637cecbad493ae9d4" exitCode=0 Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.283733 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" event={"ID":"00fa7636-13e7-49b7-8ce0-dac88eab63d7","Type":"ContainerDied","Data":"969f13fc29150a1fb6e7ff4c3c5474894ef36f1dba90993637cecbad493ae9d4"} Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.362941 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-f7d475897-pp967" podStartSLOduration=17.362879935 podStartE2EDuration="17.362879935s" podCreationTimestamp="2025-12-10 11:16:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:17:10.313583042 +0000 UTC m=+1935.166976485" watchObservedRunningTime="2025-12-10 11:17:10.362879935 +0000 UTC m=+1935.216273378" Dec 10 11:17:10 crc kubenswrapper[4780]: I1210 11:17:10.966824 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="61b91736-8f72-474a-ab9a-8071b34b8458" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.206:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:17:11 crc kubenswrapper[4780]: I1210 11:17:11.454999 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a0987c6-976b-4d4e-9456-4516cdaf53a0","Type":"ContainerStarted","Data":"aa8add3c65954114401a345b3d4de68b3f8115e62f52ae77bed00c01aed76a7a"} Dec 10 11:17:11 crc kubenswrapper[4780]: I1210 11:17:11.700576 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:17:11 crc kubenswrapper[4780]: I1210 11:17:11.843969 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzfbs\" (UniqueName: \"kubernetes.io/projected/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-kube-api-access-fzfbs\") pod \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\" (UID: \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\") " Dec 10 11:17:11 crc kubenswrapper[4780]: I1210 11:17:11.844286 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-operator-scripts\") pod \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\" (UID: \"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990\") " Dec 10 11:17:11 crc kubenswrapper[4780]: I1210 11:17:11.846873 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990" (UID: "f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:11 crc kubenswrapper[4780]: I1210 11:17:11.850159 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:11 crc kubenswrapper[4780]: I1210 11:17:11.866863 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-kube-api-access-fzfbs" (OuterVolumeSpecName: "kube-api-access-fzfbs") pod "f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990" (UID: "f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990"). InnerVolumeSpecName "kube-api-access-fzfbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:11 crc kubenswrapper[4780]: I1210 11:17:11.954705 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzfbs\" (UniqueName: \"kubernetes.io/projected/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990-kube-api-access-fzfbs\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:12 crc kubenswrapper[4780]: I1210 11:17:12.476341 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kjc6l" event={"ID":"f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990","Type":"ContainerDied","Data":"b089d4c2a526e5dcac3119b06baa712fcd2e3464c8a49a564e0c610c3ea234a4"} Dec 10 11:17:12 crc kubenswrapper[4780]: I1210 11:17:12.476405 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b089d4c2a526e5dcac3119b06baa712fcd2e3464c8a49a564e0c610c3ea234a4" Dec 10 11:17:12 crc kubenswrapper[4780]: I1210 11:17:12.476505 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kjc6l" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.391140 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.437766 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9a943a1-6945-4117-a123-5c96d85b4e77-operator-scripts\") pod \"d9a943a1-6945-4117-a123-5c96d85b4e77\" (UID: \"d9a943a1-6945-4117-a123-5c96d85b4e77\") " Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.440477 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9a943a1-6945-4117-a123-5c96d85b4e77-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d9a943a1-6945-4117-a123-5c96d85b4e77" (UID: "d9a943a1-6945-4117-a123-5c96d85b4e77"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.472482 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.541090 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2l87b\" (UniqueName: \"kubernetes.io/projected/b1f2fac2-0797-448c-b4d8-98ebd4eff159-kube-api-access-2l87b\") pod \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\" (UID: \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\") " Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.541752 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1f2fac2-0797-448c-b4d8-98ebd4eff159-operator-scripts\") pod \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\" (UID: \"b1f2fac2-0797-448c-b4d8-98ebd4eff159\") " Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.541817 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4sd4\" (UniqueName: \"kubernetes.io/projected/d9a943a1-6945-4117-a123-5c96d85b4e77-kube-api-access-c4sd4\") pod \"d9a943a1-6945-4117-a123-5c96d85b4e77\" (UID: \"d9a943a1-6945-4117-a123-5c96d85b4e77\") " Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.544559 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9a943a1-6945-4117-a123-5c96d85b4e77-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.554648 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1f2fac2-0797-448c-b4d8-98ebd4eff159-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b1f2fac2-0797-448c-b4d8-98ebd4eff159" (UID: "b1f2fac2-0797-448c-b4d8-98ebd4eff159"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.577655 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9a943a1-6945-4117-a123-5c96d85b4e77-kube-api-access-c4sd4" (OuterVolumeSpecName: "kube-api-access-c4sd4") pod "d9a943a1-6945-4117-a123-5c96d85b4e77" (UID: "d9a943a1-6945-4117-a123-5c96d85b4e77"). InnerVolumeSpecName "kube-api-access-c4sd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.596634 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1f2fac2-0797-448c-b4d8-98ebd4eff159-kube-api-access-2l87b" (OuterVolumeSpecName: "kube-api-access-2l87b") pod "b1f2fac2-0797-448c-b4d8-98ebd4eff159" (UID: "b1f2fac2-0797-448c-b4d8-98ebd4eff159"). InnerVolumeSpecName "kube-api-access-2l87b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.647971 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2l87b\" (UniqueName: \"kubernetes.io/projected/b1f2fac2-0797-448c-b4d8-98ebd4eff159-kube-api-access-2l87b\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.648029 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1f2fac2-0797-448c-b4d8-98ebd4eff159-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.648041 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4sd4\" (UniqueName: \"kubernetes.io/projected/d9a943a1-6945-4117-a123-5c96d85b4e77-kube-api-access-c4sd4\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.677234 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" event={"ID":"00fa7636-13e7-49b7-8ce0-dac88eab63d7","Type":"ContainerDied","Data":"7df620b06709be841d39024bcf829926b84623eaca7f82ee368e929e001a2eec"} Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.677316 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7df620b06709be841d39024bcf829926b84623eaca7f82ee368e929e001a2eec" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.697913 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2c72-account-create-update-22zbp" event={"ID":"d9a943a1-6945-4117-a123-5c96d85b4e77","Type":"ContainerDied","Data":"98184f23707ca7f7f520c47b3348bb15b666dfb6a00b16b67b8d28c49bee677f"} Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.698004 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98184f23707ca7f7f520c47b3348bb15b666dfb6a00b16b67b8d28c49bee677f" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.698079 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2c72-account-create-update-22zbp" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.703664 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-122d-account-create-update-tj4vk" event={"ID":"b1f2fac2-0797-448c-b4d8-98ebd4eff159","Type":"ContainerDied","Data":"b67bed4b01f79b394fd2f8152b9fd7b3d46bbb73c4b247b0a879c59dedadf41b"} Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.703711 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-122d-account-create-update-tj4vk" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.703740 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b67bed4b01f79b394fd2f8152b9fd7b3d46bbb73c4b247b0a879c59dedadf41b" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.734704 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tw2jn" event={"ID":"87bfb484-2111-41a3-99d5-52e8db80f098","Type":"ContainerDied","Data":"87ca209e1be0e82bc58e8cce24c0435fce40773f03deb46acf8f8065fdb2d97d"} Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.734792 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87ca209e1be0e82bc58e8cce24c0435fce40773f03deb46acf8f8065fdb2d97d" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.850570 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.852466 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x22fh\" (UniqueName: \"kubernetes.io/projected/00fa7636-13e7-49b7-8ce0-dac88eab63d7-kube-api-access-x22fh\") pod \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\" (UID: \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\") " Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.855444 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00fa7636-13e7-49b7-8ce0-dac88eab63d7-operator-scripts\") pod \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\" (UID: \"00fa7636-13e7-49b7-8ce0-dac88eab63d7\") " Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.858699 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00fa7636-13e7-49b7-8ce0-dac88eab63d7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "00fa7636-13e7-49b7-8ce0-dac88eab63d7" (UID: "00fa7636-13e7-49b7-8ce0-dac88eab63d7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.867601 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00fa7636-13e7-49b7-8ce0-dac88eab63d7-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:13 crc kubenswrapper[4780]: I1210 11:17:13.920792 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.000616 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87bfb484-2111-41a3-99d5-52e8db80f098-operator-scripts\") pod \"87bfb484-2111-41a3-99d5-52e8db80f098\" (UID: \"87bfb484-2111-41a3-99d5-52e8db80f098\") " Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.000788 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8dxp\" (UniqueName: \"kubernetes.io/projected/87bfb484-2111-41a3-99d5-52e8db80f098-kube-api-access-w8dxp\") pod \"87bfb484-2111-41a3-99d5-52e8db80f098\" (UID: \"87bfb484-2111-41a3-99d5-52e8db80f098\") " Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.001779 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87bfb484-2111-41a3-99d5-52e8db80f098-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "87bfb484-2111-41a3-99d5-52e8db80f098" (UID: "87bfb484-2111-41a3-99d5-52e8db80f098"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.038898 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87bfb484-2111-41a3-99d5-52e8db80f098-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.050664 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00fa7636-13e7-49b7-8ce0-dac88eab63d7-kube-api-access-x22fh" (OuterVolumeSpecName: "kube-api-access-x22fh") pod "00fa7636-13e7-49b7-8ce0-dac88eab63d7" (UID: "00fa7636-13e7-49b7-8ce0-dac88eab63d7"). InnerVolumeSpecName "kube-api-access-x22fh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.072524 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87bfb484-2111-41a3-99d5-52e8db80f098-kube-api-access-w8dxp" (OuterVolumeSpecName: "kube-api-access-w8dxp") pod "87bfb484-2111-41a3-99d5-52e8db80f098" (UID: "87bfb484-2111-41a3-99d5-52e8db80f098"). InnerVolumeSpecName "kube-api-access-w8dxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.197876 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8dxp\" (UniqueName: \"kubernetes.io/projected/87bfb484-2111-41a3-99d5-52e8db80f098-kube-api-access-w8dxp\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.198042 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x22fh\" (UniqueName: \"kubernetes.io/projected/00fa7636-13e7-49b7-8ce0-dac88eab63d7-kube-api-access-x22fh\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.800574 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7bc68bff5f-xvzgg" event={"ID":"77c41a0f-d539-496a-85f5-f6aec31747a9","Type":"ContainerStarted","Data":"b96b27bbc2aaeec41555ddb02793a2d4d03ca6571a8980464824c3158cc557f4"} Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.801261 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.800616 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-7bc68bff5f-xvzgg" podUID="77c41a0f-d539-496a-85f5-f6aec31747a9" containerName="heat-api" containerID="cri-o://b96b27bbc2aaeec41555ddb02793a2d4d03ca6571a8980464824c3158cc557f4" gracePeriod=60 Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.807328 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7c6599c744-svfvf" event={"ID":"c09a9d73-c35e-40da-b545-ebf1b8724896","Type":"ContainerStarted","Data":"83d4e36136191b36d54d721fd4d8bac6982142cbe25322132fc6cdcc8b040dc2"} Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.809574 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.823345 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7788568699-swzvp" event={"ID":"2df2ca18-27cb-48e0-842c-f346f4703a37","Type":"ContainerStarted","Data":"420a020bd4954456b8ed1de68d4c9e5b89a061e63bbbd46f94b2ef8ade1e0353"} Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.824343 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.839938 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7bc68bff5f-xvzgg" podStartSLOduration=15.178925018 podStartE2EDuration="21.839873658s" podCreationTimestamp="2025-12-10 11:16:53 +0000 UTC" firstStartedPulling="2025-12-10 11:17:06.178668541 +0000 UTC m=+1931.032061984" lastFinishedPulling="2025-12-10 11:17:12.839617181 +0000 UTC m=+1937.693010624" observedRunningTime="2025-12-10 11:17:14.833263849 +0000 UTC m=+1939.686657292" watchObservedRunningTime="2025-12-10 11:17:14.839873658 +0000 UTC m=+1939.693267101" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.849448 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" event={"ID":"8400fa88-5e91-417f-9495-12e8efcf25d0","Type":"ContainerStarted","Data":"59af7a12e82c50ed9f7be18ab82d68521fd7f872f011b56513b8ad717d60a80d"} Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.849777 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.870098 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"274acc05-0f10-48e5-8fb8-44bc1ddca126","Type":"ContainerStarted","Data":"442ea43394c3c3d366415bc7d78b9efdac2b0c33cd15f3f01e4495af0e21abc9"} Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.876942 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7c6599c744-svfvf" podStartSLOduration=8.139246433 podStartE2EDuration="13.876881767s" podCreationTimestamp="2025-12-10 11:17:01 +0000 UTC" firstStartedPulling="2025-12-10 11:17:07.168146972 +0000 UTC m=+1932.021540415" lastFinishedPulling="2025-12-10 11:17:12.905782306 +0000 UTC m=+1937.759175749" observedRunningTime="2025-12-10 11:17:14.859135082 +0000 UTC m=+1939.712528535" watchObservedRunningTime="2025-12-10 11:17:14.876881767 +0000 UTC m=+1939.730275220" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.884806 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" event={"ID":"0e464f35-41ce-4f1e-b728-c12bfb04abb4","Type":"ContainerStarted","Data":"f68bbc8316b87a03668eb71efa33b91739e74aaeddee8d6ef21013a97f3389c0"} Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.887240 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.894170 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" podUID="19201430-5ecc-4a0b-ad28-5cdfff8d037a" containerName="heat-cfnapi" containerID="cri-o://17f44b9919535fa55e47ec91ea4abc4f74f00d2e89680cb06d340a52b696ac25" gracePeriod=60 Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.894651 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" event={"ID":"19201430-5ecc-4a0b-ad28-5cdfff8d037a","Type":"ContainerStarted","Data":"17f44b9919535fa55e47ec91ea4abc4f74f00d2e89680cb06d340a52b696ac25"} Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.894705 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.906072 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7788568699-swzvp" podStartSLOduration=8.062448936 podStartE2EDuration="13.906036664s" podCreationTimestamp="2025-12-10 11:17:01 +0000 UTC" firstStartedPulling="2025-12-10 11:17:07.097872452 +0000 UTC m=+1931.951265895" lastFinishedPulling="2025-12-10 11:17:12.94146018 +0000 UTC m=+1937.794853623" observedRunningTime="2025-12-10 11:17:14.900179693 +0000 UTC m=+1939.753573136" watchObservedRunningTime="2025-12-10 11:17:14.906036664 +0000 UTC m=+1939.759430107" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.918827 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerStarted","Data":"796da8de351bc893629f2d518e50615ba779ea06ca569d468390fe6eaeef5431"} Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.919005 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tw2jn" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.918903 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-9ebb-account-create-update-nb7g9" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.974914 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=11.974882467 podStartE2EDuration="11.974882467s" podCreationTimestamp="2025-12-10 11:17:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:17:14.932087241 +0000 UTC m=+1939.785480684" watchObservedRunningTime="2025-12-10 11:17:14.974882467 +0000 UTC m=+1939.828275910" Dec 10 11:17:14 crc kubenswrapper[4780]: I1210 11:17:14.996505 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" podStartSLOduration=21.996479171 podStartE2EDuration="21.996479171s" podCreationTimestamp="2025-12-10 11:16:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:17:14.965318872 +0000 UTC m=+1939.818712315" watchObservedRunningTime="2025-12-10 11:17:14.996479171 +0000 UTC m=+1939.849872614" Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.023781 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" podStartSLOduration=5.386791547 podStartE2EDuration="10.0237566s" podCreationTimestamp="2025-12-10 11:17:05 +0000 UTC" firstStartedPulling="2025-12-10 11:17:08.145444502 +0000 UTC m=+1932.998837945" lastFinishedPulling="2025-12-10 11:17:12.782409555 +0000 UTC m=+1937.635802998" observedRunningTime="2025-12-10 11:17:15.007960865 +0000 UTC m=+1939.861354318" watchObservedRunningTime="2025-12-10 11:17:15.0237566 +0000 UTC m=+1939.877150033" Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.057727 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" podStartSLOduration=15.363276533 podStartE2EDuration="22.057690699s" podCreationTimestamp="2025-12-10 11:16:53 +0000 UTC" firstStartedPulling="2025-12-10 11:17:06.246701935 +0000 UTC m=+1931.100095378" lastFinishedPulling="2025-12-10 11:17:12.941116101 +0000 UTC m=+1937.794509544" observedRunningTime="2025-12-10 11:17:15.035257714 +0000 UTC m=+1939.888651157" watchObservedRunningTime="2025-12-10 11:17:15.057690699 +0000 UTC m=+1939.911084142" Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.936012 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5a0987c6-976b-4d4e-9456-4516cdaf53a0","Type":"ContainerStarted","Data":"2ac48ea35ef6cbe3aa3f7693e09fd3628490bc227538423d678b990e7e3088ea"} Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.936907 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.939289 4780 generic.go:334] "Generic (PLEG): container finished" podID="c09a9d73-c35e-40da-b545-ebf1b8724896" containerID="83d4e36136191b36d54d721fd4d8bac6982142cbe25322132fc6cdcc8b040dc2" exitCode=1 Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.939403 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7c6599c744-svfvf" event={"ID":"c09a9d73-c35e-40da-b545-ebf1b8724896","Type":"ContainerDied","Data":"83d4e36136191b36d54d721fd4d8bac6982142cbe25322132fc6cdcc8b040dc2"} Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.940702 4780 scope.go:117] "RemoveContainer" containerID="83d4e36136191b36d54d721fd4d8bac6982142cbe25322132fc6cdcc8b040dc2" Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.942600 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5bffc7b484-bhjz4" event={"ID":"43cf9913-8179-4d01-a9d8-40ae5078b366","Type":"ContainerStarted","Data":"e749668db886991f2f6595b746c0a9a5efd7607812425274967f778030dc2a44"} Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.943571 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.945478 4780 generic.go:334] "Generic (PLEG): container finished" podID="2df2ca18-27cb-48e0-842c-f346f4703a37" containerID="420a020bd4954456b8ed1de68d4c9e5b89a061e63bbbd46f94b2ef8ade1e0353" exitCode=1 Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.945634 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7788568699-swzvp" event={"ID":"2df2ca18-27cb-48e0-842c-f346f4703a37","Type":"ContainerDied","Data":"420a020bd4954456b8ed1de68d4c9e5b89a061e63bbbd46f94b2ef8ade1e0353"} Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.946337 4780 scope.go:117] "RemoveContainer" containerID="420a020bd4954456b8ed1de68d4c9e5b89a061e63bbbd46f94b2ef8ade1e0353" Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.961673 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:17:15 crc kubenswrapper[4780]: E1210 11:17:15.962108 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:17:15 crc kubenswrapper[4780]: I1210 11:17:15.971292 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=8.971264506 podStartE2EDuration="8.971264506s" podCreationTimestamp="2025-12-10 11:17:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:17:15.966595696 +0000 UTC m=+1940.819989139" watchObservedRunningTime="2025-12-10 11:17:15.971264506 +0000 UTC m=+1940.824657949" Dec 10 11:17:16 crc kubenswrapper[4780]: I1210 11:17:16.202041 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5bffc7b484-bhjz4" podStartSLOduration=6.451764992 podStartE2EDuration="11.202022198s" podCreationTimestamp="2025-12-10 11:17:05 +0000 UTC" firstStartedPulling="2025-12-10 11:17:08.154579436 +0000 UTC m=+1933.007972879" lastFinishedPulling="2025-12-10 11:17:12.904836642 +0000 UTC m=+1937.758230085" observedRunningTime="2025-12-10 11:17:16.034691841 +0000 UTC m=+1940.888085274" watchObservedRunningTime="2025-12-10 11:17:16.202022198 +0000 UTC m=+1941.055415641" Dec 10 11:17:17 crc kubenswrapper[4780]: I1210 11:17:17.077830 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:17 crc kubenswrapper[4780]: I1210 11:17:17.114324 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:17 crc kubenswrapper[4780]: I1210 11:17:17.245992 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7c6599c744-svfvf" event={"ID":"c09a9d73-c35e-40da-b545-ebf1b8724896","Type":"ContainerStarted","Data":"61dcd300f2425a7bda15572ca98b5c00a398cba525447432a25091e819a341fb"} Dec 10 11:17:18 crc kubenswrapper[4780]: E1210 11:17:18.019683 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2df2ca18_27cb_48e0_842c_f346f4703a37.slice/crio-52fa087134da031e63f10253f05707262c2632a1257945320cfed4ac104f74f3.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.263767 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e74ddef3-dfb9-4409-9920-1cad0dc2492c","Type":"ContainerStarted","Data":"40fc3363fa4695368ddaaa0d2f3b266b7fb7dbe9c354b00ed41387ffa86ec1db"} Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.269033 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerStarted","Data":"e1dccb49125ca08f170604f7879f137628038860557c9038dae1b6d83136b3f0"} Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.274321 4780 generic.go:334] "Generic (PLEG): container finished" podID="c09a9d73-c35e-40da-b545-ebf1b8724896" containerID="61dcd300f2425a7bda15572ca98b5c00a398cba525447432a25091e819a341fb" exitCode=1 Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.274464 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7c6599c744-svfvf" event={"ID":"c09a9d73-c35e-40da-b545-ebf1b8724896","Type":"ContainerDied","Data":"61dcd300f2425a7bda15572ca98b5c00a398cba525447432a25091e819a341fb"} Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.274594 4780 scope.go:117] "RemoveContainer" containerID="83d4e36136191b36d54d721fd4d8bac6982142cbe25322132fc6cdcc8b040dc2" Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.275844 4780 scope.go:117] "RemoveContainer" containerID="61dcd300f2425a7bda15572ca98b5c00a398cba525447432a25091e819a341fb" Dec 10 11:17:18 crc kubenswrapper[4780]: E1210 11:17:18.276463 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-7c6599c744-svfvf_openstack(c09a9d73-c35e-40da-b545-ebf1b8724896)\"" pod="openstack/heat-api-7c6599c744-svfvf" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.280631 4780 generic.go:334] "Generic (PLEG): container finished" podID="2df2ca18-27cb-48e0-842c-f346f4703a37" containerID="52fa087134da031e63f10253f05707262c2632a1257945320cfed4ac104f74f3" exitCode=1 Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.280733 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7788568699-swzvp" event={"ID":"2df2ca18-27cb-48e0-842c-f346f4703a37","Type":"ContainerDied","Data":"52fa087134da031e63f10253f05707262c2632a1257945320cfed4ac104f74f3"} Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.282609 4780 scope.go:117] "RemoveContainer" containerID="52fa087134da031e63f10253f05707262c2632a1257945320cfed4ac104f74f3" Dec 10 11:17:18 crc kubenswrapper[4780]: E1210 11:17:18.283172 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7788568699-swzvp_openstack(2df2ca18-27cb-48e0-842c-f346f4703a37)\"" pod="openstack/heat-cfnapi-7788568699-swzvp" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.301213 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.653002949 podStartE2EDuration="45.30119087s" podCreationTimestamp="2025-12-10 11:16:33 +0000 UTC" firstStartedPulling="2025-12-10 11:16:34.510964719 +0000 UTC m=+1899.364358162" lastFinishedPulling="2025-12-10 11:17:17.15915264 +0000 UTC m=+1942.012546083" observedRunningTime="2025-12-10 11:17:18.30001433 +0000 UTC m=+1943.153407783" watchObservedRunningTime="2025-12-10 11:17:18.30119087 +0000 UTC m=+1943.154584313" Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.406313 4780 scope.go:117] "RemoveContainer" containerID="420a020bd4954456b8ed1de68d4c9e5b89a061e63bbbd46f94b2ef8ade1e0353" Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.624539 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.739548 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.903589 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-wkfgs"] Dec 10 11:17:18 crc kubenswrapper[4780]: I1210 11:17:18.905374 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" podUID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" containerName="dnsmasq-dns" containerID="cri-o://970290ae16f5d622a134ec7215bb08fd80d8d9c966aa67a88af90738884d8672" gracePeriod=10 Dec 10 11:17:19 crc kubenswrapper[4780]: I1210 11:17:19.327807 4780 generic.go:334] "Generic (PLEG): container finished" podID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" containerID="970290ae16f5d622a134ec7215bb08fd80d8d9c966aa67a88af90738884d8672" exitCode=0 Dec 10 11:17:19 crc kubenswrapper[4780]: I1210 11:17:19.328035 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" event={"ID":"7e45c931-6a45-4eb5-b511-3be5b47c2f4f","Type":"ContainerDied","Data":"970290ae16f5d622a134ec7215bb08fd80d8d9c966aa67a88af90738884d8672"} Dec 10 11:17:19 crc kubenswrapper[4780]: I1210 11:17:19.336660 4780 scope.go:117] "RemoveContainer" containerID="61dcd300f2425a7bda15572ca98b5c00a398cba525447432a25091e819a341fb" Dec 10 11:17:19 crc kubenswrapper[4780]: E1210 11:17:19.337103 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-7c6599c744-svfvf_openstack(c09a9d73-c35e-40da-b545-ebf1b8724896)\"" pod="openstack/heat-api-7c6599c744-svfvf" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" Dec 10 11:17:19 crc kubenswrapper[4780]: I1210 11:17:19.358056 4780 scope.go:117] "RemoveContainer" containerID="52fa087134da031e63f10253f05707262c2632a1257945320cfed4ac104f74f3" Dec 10 11:17:19 crc kubenswrapper[4780]: E1210 11:17:19.358587 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7788568699-swzvp_openstack(2df2ca18-27cb-48e0-842c-f346f4703a37)\"" pod="openstack/heat-cfnapi-7788568699-swzvp" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" Dec 10 11:17:19 crc kubenswrapper[4780]: I1210 11:17:19.500096 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 10 11:17:19 crc kubenswrapper[4780]: I1210 11:17:19.502043 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="274acc05-0f10-48e5-8fb8-44bc1ddca126" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.223:8080/\": dial tcp 10.217.0.223:8080: connect: connection refused" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.609810 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" event={"ID":"7e45c931-6a45-4eb5-b511-3be5b47c2f4f","Type":"ContainerDied","Data":"6f95dcac5251b2475340432d6ea8ce559a86a527c357e9efc597e80c7ec3a760"} Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.610287 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f95dcac5251b2475340432d6ea8ce559a86a527c357e9efc597e80c7ec3a760" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.650374 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.758759 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-slw88"] Dec 10 11:17:20 crc kubenswrapper[4780]: E1210 11:17:20.759888 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00fa7636-13e7-49b7-8ce0-dac88eab63d7" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.759947 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="00fa7636-13e7-49b7-8ce0-dac88eab63d7" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: E1210 11:17:20.759969 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1f2fac2-0797-448c-b4d8-98ebd4eff159" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.759977 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1f2fac2-0797-448c-b4d8-98ebd4eff159" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: E1210 11:17:20.760002 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87bfb484-2111-41a3-99d5-52e8db80f098" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760009 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="87bfb484-2111-41a3-99d5-52e8db80f098" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: E1210 11:17:20.760040 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" containerName="dnsmasq-dns" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760047 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" containerName="dnsmasq-dns" Dec 10 11:17:20 crc kubenswrapper[4780]: E1210 11:17:20.760060 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" containerName="init" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760066 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" containerName="init" Dec 10 11:17:20 crc kubenswrapper[4780]: E1210 11:17:20.760081 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9a943a1-6945-4117-a123-5c96d85b4e77" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760088 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9a943a1-6945-4117-a123-5c96d85b4e77" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: E1210 11:17:20.760111 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a58ed76c-f42e-41ca-8e79-3b656701cdea" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760118 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a58ed76c-f42e-41ca-8e79-3b656701cdea" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: E1210 11:17:20.760130 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760136 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760439 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="a58ed76c-f42e-41ca-8e79-3b656701cdea" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760482 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9a943a1-6945-4117-a123-5c96d85b4e77" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760507 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" containerName="dnsmasq-dns" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760536 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760550 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="87bfb484-2111-41a3-99d5-52e8db80f098" containerName="mariadb-database-create" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760562 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="00fa7636-13e7-49b7-8ce0-dac88eab63d7" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.760578 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1f2fac2-0797-448c-b4d8-98ebd4eff159" containerName="mariadb-account-create-update" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.762048 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.790535 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-2tcc2" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.790787 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.790910 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.792554 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-nb\") pod \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.792698 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-svc\") pod \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.792763 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-sb\") pod \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.792847 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnpkv\" (UniqueName: \"kubernetes.io/projected/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-kube-api-access-mnpkv\") pod \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.792980 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-config\") pod \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.793057 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-swift-storage-0\") pod \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\" (UID: \"7e45c931-6a45-4eb5-b511-3be5b47c2f4f\") " Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.833242 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-kube-api-access-mnpkv" (OuterVolumeSpecName: "kube-api-access-mnpkv") pod "7e45c931-6a45-4eb5-b511-3be5b47c2f4f" (UID: "7e45c931-6a45-4eb5-b511-3be5b47c2f4f"). InnerVolumeSpecName "kube-api-access-mnpkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.904205 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.904319 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crhlm\" (UniqueName: \"kubernetes.io/projected/a94e2b82-6087-4791-8f15-e1ca9e25028b-kube-api-access-crhlm\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.904519 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-config-data\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.904842 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-scripts\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.920054 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnpkv\" (UniqueName: \"kubernetes.io/projected/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-kube-api-access-mnpkv\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:20 crc kubenswrapper[4780]: I1210 11:17:20.924996 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-slw88"] Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.027674 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crhlm\" (UniqueName: \"kubernetes.io/projected/a94e2b82-6087-4791-8f15-e1ca9e25028b-kube-api-access-crhlm\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.027864 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-config-data\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.028141 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-scripts\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.028255 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.081492 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.114038 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-config-data\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.114293 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-scripts\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.114479 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7e45c931-6a45-4eb5-b511-3be5b47c2f4f" (UID: "7e45c931-6a45-4eb5-b511-3be5b47c2f4f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.143308 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crhlm\" (UniqueName: \"kubernetes.io/projected/a94e2b82-6087-4791-8f15-e1ca9e25028b-kube-api-access-crhlm\") pod \"nova-cell0-conductor-db-sync-slw88\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.145288 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.173603 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7e45c931-6a45-4eb5-b511-3be5b47c2f4f" (UID: "7e45c931-6a45-4eb5-b511-3be5b47c2f4f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.220965 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-config" (OuterVolumeSpecName: "config") pod "7e45c931-6a45-4eb5-b511-3be5b47c2f4f" (UID: "7e45c931-6a45-4eb5-b511-3be5b47c2f4f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.265365 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.265441 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.308099 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7e45c931-6a45-4eb5-b511-3be5b47c2f4f" (UID: "7e45c931-6a45-4eb5-b511-3be5b47c2f4f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.330764 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7e45c931-6a45-4eb5-b511-3be5b47c2f4f" (UID: "7e45c931-6a45-4eb5-b511-3be5b47c2f4f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.366725 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.371177 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.371236 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e45c931-6a45-4eb5-b511-3be5b47c2f4f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.552357 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.553711 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.553999 4780 scope.go:117] "RemoveContainer" containerID="61dcd300f2425a7bda15572ca98b5c00a398cba525447432a25091e819a341fb" Dec 10 11:17:21 crc kubenswrapper[4780]: E1210 11:17:21.554525 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-7c6599c744-svfvf_openstack(c09a9d73-c35e-40da-b545-ebf1b8724896)\"" pod="openstack/heat-api-7c6599c744-svfvf" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.587081 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.587170 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.588636 4780 scope.go:117] "RemoveContainer" containerID="52fa087134da031e63f10253f05707262c2632a1257945320cfed4ac104f74f3" Dec 10 11:17:21 crc kubenswrapper[4780]: E1210 11:17:21.589329 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7788568699-swzvp_openstack(2df2ca18-27cb-48e0-842c-f346f4703a37)\"" pod="openstack/heat-cfnapi-7788568699-swzvp" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.637903 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-wkfgs" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.638867 4780 scope.go:117] "RemoveContainer" containerID="61dcd300f2425a7bda15572ca98b5c00a398cba525447432a25091e819a341fb" Dec 10 11:17:21 crc kubenswrapper[4780]: E1210 11:17:21.682556 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-7c6599c744-svfvf_openstack(c09a9d73-c35e-40da-b545-ebf1b8724896)\"" pod="openstack/heat-api-7c6599c744-svfvf" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.871631 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-wkfgs"] Dec 10 11:17:21 crc kubenswrapper[4780]: I1210 11:17:21.891942 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-wkfgs"] Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.085410 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e45c931-6a45-4eb5-b511-3be5b47c2f4f" path="/var/lib/kubelet/pods/7e45c931-6a45-4eb5-b511-3be5b47c2f4f/volumes" Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.096893 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-86d9dd567b-q6nsm" Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.247624 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-f7d475897-pp967"] Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.248323 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-f7d475897-pp967" podUID="068aca35-6c22-4164-bf63-481836e82331" containerName="heat-engine" containerID="cri-o://4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" gracePeriod=60 Dec 10 11:17:22 crc kubenswrapper[4780]: E1210 11:17:22.267670 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 11:17:22 crc kubenswrapper[4780]: E1210 11:17:22.271365 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 11:17:22 crc kubenswrapper[4780]: E1210 11:17:22.281268 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 11:17:22 crc kubenswrapper[4780]: E1210 11:17:22.281395 4780 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-f7d475897-pp967" podUID="068aca35-6c22-4164-bf63-481836e82331" containerName="heat-engine" Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.335445 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-slw88"] Dec 10 11:17:22 crc kubenswrapper[4780]: W1210 11:17:22.376130 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda94e2b82_6087_4791_8f15_e1ca9e25028b.slice/crio-9685a6b9643a6f71a137f67a2bea083504c69a33470941fc116fa686e3a4e02f WatchSource:0}: Error finding container 9685a6b9643a6f71a137f67a2bea083504c69a33470941fc116fa686e3a4e02f: Status 404 returned error can't find the container with id 9685a6b9643a6f71a137f67a2bea083504c69a33470941fc116fa686e3a4e02f Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.691885 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerStarted","Data":"cd2fd40c94f3f1e8ad743013a401a63c30ef5573adcc6429b319c7d3bc8197ad"} Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.692372 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="sg-core" containerID="cri-o://e1dccb49125ca08f170604f7879f137628038860557c9038dae1b6d83136b3f0" gracePeriod=30 Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.692457 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="proxy-httpd" containerID="cri-o://cd2fd40c94f3f1e8ad743013a401a63c30ef5573adcc6429b319c7d3bc8197ad" gracePeriod=30 Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.692404 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="ceilometer-notification-agent" containerID="cri-o://796da8de351bc893629f2d518e50615ba779ea06ca569d468390fe6eaeef5431" gracePeriod=30 Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.692311 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="ceilometer-central-agent" containerID="cri-o://6ee625cf7513efd4f88067c7645ead80ccb89341b0b7ba6639feb075d3f53231" gracePeriod=30 Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.693153 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.709100 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-slw88" event={"ID":"a94e2b82-6087-4791-8f15-e1ca9e25028b","Type":"ContainerStarted","Data":"9685a6b9643a6f71a137f67a2bea083504c69a33470941fc116fa686e3a4e02f"} Dec 10 11:17:22 crc kubenswrapper[4780]: I1210 11:17:22.757667 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=7.121764356 podStartE2EDuration="19.757633898s" podCreationTimestamp="2025-12-10 11:17:03 +0000 UTC" firstStartedPulling="2025-12-10 11:17:08.07318003 +0000 UTC m=+1932.926573473" lastFinishedPulling="2025-12-10 11:17:20.709049572 +0000 UTC m=+1945.562443015" observedRunningTime="2025-12-10 11:17:22.728346578 +0000 UTC m=+1947.581740021" watchObservedRunningTime="2025-12-10 11:17:22.757633898 +0000 UTC m=+1947.611027351" Dec 10 11:17:24 crc kubenswrapper[4780]: E1210 11:17:24.469690 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 11:17:24 crc kubenswrapper[4780]: E1210 11:17:24.478005 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 11:17:24 crc kubenswrapper[4780]: E1210 11:17:24.558238 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 11:17:24 crc kubenswrapper[4780]: E1210 11:17:24.558372 4780 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-f7d475897-pp967" podUID="068aca35-6c22-4164-bf63-481836e82331" containerName="heat-engine" Dec 10 11:17:24 crc kubenswrapper[4780]: I1210 11:17:24.559385 4780 generic.go:334] "Generic (PLEG): container finished" podID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerID="e1dccb49125ca08f170604f7879f137628038860557c9038dae1b6d83136b3f0" exitCode=2 Dec 10 11:17:24 crc kubenswrapper[4780]: I1210 11:17:24.559637 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerDied","Data":"e1dccb49125ca08f170604f7879f137628038860557c9038dae1b6d83136b3f0"} Dec 10 11:17:24 crc kubenswrapper[4780]: I1210 11:17:24.852315 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="5a0987c6-976b-4d4e-9456-4516cdaf53a0" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.226:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:17:25 crc kubenswrapper[4780]: I1210 11:17:25.585341 4780 generic.go:334] "Generic (PLEG): container finished" podID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerID="cd2fd40c94f3f1e8ad743013a401a63c30ef5573adcc6429b319c7d3bc8197ad" exitCode=0 Dec 10 11:17:25 crc kubenswrapper[4780]: I1210 11:17:25.585884 4780 generic.go:334] "Generic (PLEG): container finished" podID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerID="796da8de351bc893629f2d518e50615ba779ea06ca569d468390fe6eaeef5431" exitCode=0 Dec 10 11:17:25 crc kubenswrapper[4780]: I1210 11:17:25.585974 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerDied","Data":"cd2fd40c94f3f1e8ad743013a401a63c30ef5573adcc6429b319c7d3bc8197ad"} Dec 10 11:17:25 crc kubenswrapper[4780]: I1210 11:17:25.586024 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerDied","Data":"796da8de351bc893629f2d518e50615ba779ea06ca569d468390fe6eaeef5431"} Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.152023 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.711340 4780 generic.go:334] "Generic (PLEG): container finished" podID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerID="6ee625cf7513efd4f88067c7645ead80ccb89341b0b7ba6639feb075d3f53231" exitCode=0 Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.713733 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerDied","Data":"6ee625cf7513efd4f88067c7645ead80ccb89341b0b7ba6639feb075d3f53231"} Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.822061 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.941820 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-scripts\") pod \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.942037 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-combined-ca-bundle\") pod \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.942260 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-log-httpd\") pod \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.942445 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-config-data\") pod \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.942503 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-sg-core-conf-yaml\") pod \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.942568 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-run-httpd\") pod \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.942682 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvkcc\" (UniqueName: \"kubernetes.io/projected/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-kube-api-access-wvkcc\") pod \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\" (UID: \"462cb03c-f171-4b9e-8bf2-b63ec19e6a06\") " Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.944457 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "462cb03c-f171-4b9e-8bf2-b63ec19e6a06" (UID: "462cb03c-f171-4b9e-8bf2-b63ec19e6a06"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.945847 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "462cb03c-f171-4b9e-8bf2-b63ec19e6a06" (UID: "462cb03c-f171-4b9e-8bf2-b63ec19e6a06"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.968796 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-scripts" (OuterVolumeSpecName: "scripts") pod "462cb03c-f171-4b9e-8bf2-b63ec19e6a06" (UID: "462cb03c-f171-4b9e-8bf2-b63ec19e6a06"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:26 crc kubenswrapper[4780]: I1210 11:17:26.979198 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-kube-api-access-wvkcc" (OuterVolumeSpecName: "kube-api-access-wvkcc") pod "462cb03c-f171-4b9e-8bf2-b63ec19e6a06" (UID: "462cb03c-f171-4b9e-8bf2-b63ec19e6a06"). InnerVolumeSpecName "kube-api-access-wvkcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:27 crc kubenswrapper[4780]: I1210 11:17:27.036404 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "462cb03c-f171-4b9e-8bf2-b63ec19e6a06" (UID: "462cb03c-f171-4b9e-8bf2-b63ec19e6a06"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:27 crc kubenswrapper[4780]: I1210 11:17:27.048440 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:27 crc kubenswrapper[4780]: I1210 11:17:27.048495 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvkcc\" (UniqueName: \"kubernetes.io/projected/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-kube-api-access-wvkcc\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:27 crc kubenswrapper[4780]: I1210 11:17:27.048509 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:27 crc kubenswrapper[4780]: I1210 11:17:27.048518 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:27 crc kubenswrapper[4780]: I1210 11:17:27.048529 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:27 crc kubenswrapper[4780]: I1210 11:17:27.972493 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="5a0987c6-976b-4d4e-9456-4516cdaf53a0" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.226:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:17:27 crc kubenswrapper[4780]: I1210 11:17:27.999340 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:17:28 crc kubenswrapper[4780]: E1210 11:17:28.002476 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.310945 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-5fbd6d5fcb-7dcpq" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.311017 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-5bffc7b484-bhjz4" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.353340 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"462cb03c-f171-4b9e-8bf2-b63ec19e6a06","Type":"ContainerDied","Data":"8be6df67bad5e3411c1aa681adc407659a2fd48b0dc8eb50672639198f1564b3"} Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.353431 4780 scope.go:117] "RemoveContainer" containerID="cd2fd40c94f3f1e8ad743013a401a63c30ef5573adcc6429b319c7d3bc8197ad" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.353691 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.370152 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "462cb03c-f171-4b9e-8bf2-b63ec19e6a06" (UID: "462cb03c-f171-4b9e-8bf2-b63ec19e6a06"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.458292 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.501518 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-config-data" (OuterVolumeSpecName: "config-data") pod "462cb03c-f171-4b9e-8bf2-b63ec19e6a06" (UID: "462cb03c-f171-4b9e-8bf2-b63ec19e6a06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.589213 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7c6599c744-svfvf"] Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.616319 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462cb03c-f171-4b9e-8bf2-b63ec19e6a06-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.645223 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7788568699-swzvp"] Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.780663 4780 scope.go:117] "RemoveContainer" containerID="e1dccb49125ca08f170604f7879f137628038860557c9038dae1b6d83136b3f0" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.874016 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.891812 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.909157 4780 scope.go:117] "RemoveContainer" containerID="796da8de351bc893629f2d518e50615ba779ea06ca569d468390fe6eaeef5431" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.919841 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:28 crc kubenswrapper[4780]: E1210 11:17:28.924777 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="proxy-httpd" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.932317 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="proxy-httpd" Dec 10 11:17:28 crc kubenswrapper[4780]: E1210 11:17:28.932796 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="sg-core" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.933099 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="sg-core" Dec 10 11:17:28 crc kubenswrapper[4780]: E1210 11:17:28.933275 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="ceilometer-notification-agent" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.933368 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="ceilometer-notification-agent" Dec 10 11:17:28 crc kubenswrapper[4780]: E1210 11:17:28.933483 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="ceilometer-central-agent" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.933590 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="ceilometer-central-agent" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.934470 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="ceilometer-notification-agent" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.934601 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="proxy-httpd" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.934720 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="ceilometer-central-agent" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.934856 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" containerName="sg-core" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.951346 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.943905 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.959785 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:17:28 crc kubenswrapper[4780]: I1210 11:17:28.960149 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.044990 4780 scope.go:117] "RemoveContainer" containerID="6ee625cf7513efd4f88067c7645ead80ccb89341b0b7ba6639feb075d3f53231" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.169124 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-config-data\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.169212 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.169237 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skksj\" (UniqueName: \"kubernetes.io/projected/1c47b7ce-4a43-460f-8386-d1541a901ea2-kube-api-access-skksj\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.169267 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-log-httpd\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.169372 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-run-httpd\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.169414 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.169466 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-scripts\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.275839 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-config-data\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.275957 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.275991 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skksj\" (UniqueName: \"kubernetes.io/projected/1c47b7ce-4a43-460f-8386-d1541a901ea2-kube-api-access-skksj\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.277306 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-log-httpd\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.277794 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-run-httpd\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.277914 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.278094 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-scripts\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.282347 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-log-httpd\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.283411 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-run-httpd\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.296349 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.298219 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-config-data\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.298689 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-scripts\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.320987 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.337812 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skksj\" (UniqueName: \"kubernetes.io/projected/1c47b7ce-4a43-460f-8386-d1541a901ea2-kube-api-access-skksj\") pod \"ceilometer-0\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.479225 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.628532 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.857263 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="5a0987c6-976b-4d4e-9456-4516cdaf53a0" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.226:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.927813 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.955320 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-combined-ca-bundle\") pod \"2df2ca18-27cb-48e0-842c-f346f4703a37\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.955482 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data\") pod \"2df2ca18-27cb-48e0-842c-f346f4703a37\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.955597 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gmgq\" (UniqueName: \"kubernetes.io/projected/2df2ca18-27cb-48e0-842c-f346f4703a37-kube-api-access-5gmgq\") pod \"2df2ca18-27cb-48e0-842c-f346f4703a37\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.955816 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data-custom\") pod \"2df2ca18-27cb-48e0-842c-f346f4703a37\" (UID: \"2df2ca18-27cb-48e0-842c-f346f4703a37\") " Dec 10 11:17:29 crc kubenswrapper[4780]: I1210 11:17:29.979103 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2df2ca18-27cb-48e0-842c-f346f4703a37-kube-api-access-5gmgq" (OuterVolumeSpecName: "kube-api-access-5gmgq") pod "2df2ca18-27cb-48e0-842c-f346f4703a37" (UID: "2df2ca18-27cb-48e0-842c-f346f4703a37"). InnerVolumeSpecName "kube-api-access-5gmgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.021799 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.027656 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2df2ca18-27cb-48e0-842c-f346f4703a37" (UID: "2df2ca18-27cb-48e0-842c-f346f4703a37"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.036894 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="462cb03c-f171-4b9e-8bf2-b63ec19e6a06" path="/var/lib/kubelet/pods/462cb03c-f171-4b9e-8bf2-b63ec19e6a06/volumes" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.075665 4780 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.075746 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gmgq\" (UniqueName: \"kubernetes.io/projected/2df2ca18-27cb-48e0-842c-f346f4703a37-kube-api-access-5gmgq\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.093477 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2df2ca18-27cb-48e0-842c-f346f4703a37" (UID: "2df2ca18-27cb-48e0-842c-f346f4703a37"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.177369 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data-custom\") pod \"c09a9d73-c35e-40da-b545-ebf1b8724896\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.178156 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95k4d\" (UniqueName: \"kubernetes.io/projected/c09a9d73-c35e-40da-b545-ebf1b8724896-kube-api-access-95k4d\") pod \"c09a9d73-c35e-40da-b545-ebf1b8724896\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.178455 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data\") pod \"c09a9d73-c35e-40da-b545-ebf1b8724896\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.178656 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-combined-ca-bundle\") pod \"c09a9d73-c35e-40da-b545-ebf1b8724896\" (UID: \"c09a9d73-c35e-40da-b545-ebf1b8724896\") " Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.180318 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.199569 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c09a9d73-c35e-40da-b545-ebf1b8724896" (UID: "c09a9d73-c35e-40da-b545-ebf1b8724896"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.200295 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c09a9d73-c35e-40da-b545-ebf1b8724896-kube-api-access-95k4d" (OuterVolumeSpecName: "kube-api-access-95k4d") pod "c09a9d73-c35e-40da-b545-ebf1b8724896" (UID: "c09a9d73-c35e-40da-b545-ebf1b8724896"). InnerVolumeSpecName "kube-api-access-95k4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.217972 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data" (OuterVolumeSpecName: "config-data") pod "2df2ca18-27cb-48e0-842c-f346f4703a37" (UID: "2df2ca18-27cb-48e0-842c-f346f4703a37"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.283308 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c09a9d73-c35e-40da-b545-ebf1b8724896" (UID: "c09a9d73-c35e-40da-b545-ebf1b8724896"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.283452 4780 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.283520 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95k4d\" (UniqueName: \"kubernetes.io/projected/c09a9d73-c35e-40da-b545-ebf1b8724896-kube-api-access-95k4d\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.283534 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df2ca18-27cb-48e0-842c-f346f4703a37-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.392338 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.451152 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data" (OuterVolumeSpecName: "config-data") pod "c09a9d73-c35e-40da-b545-ebf1b8724896" (UID: "c09a9d73-c35e-40da-b545-ebf1b8724896"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.500430 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09a9d73-c35e-40da-b545-ebf1b8724896-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.955886 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7c6599c744-svfvf" event={"ID":"c09a9d73-c35e-40da-b545-ebf1b8724896","Type":"ContainerDied","Data":"8f1ccd9b52bbbba5c547f5427d4232e6c0732e02c21cedc81f16df55bf6e12cf"} Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.958442 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7c6599c744-svfvf" Dec 10 11:17:30 crc kubenswrapper[4780]: I1210 11:17:30.983836 4780 scope.go:117] "RemoveContainer" containerID="61dcd300f2425a7bda15572ca98b5c00a398cba525447432a25091e819a341fb" Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:30.999795 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7788568699-swzvp" event={"ID":"2df2ca18-27cb-48e0-842c-f346f4703a37","Type":"ContainerDied","Data":"a07d749e986e9b647877729ec5ad2da3e27dbbbe142da9c4a1045ddbc02ad75d"} Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.005767 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7788568699-swzvp" Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.130080 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.189282 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7c6599c744-svfvf"] Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.203204 4780 scope.go:117] "RemoveContainer" containerID="52fa087134da031e63f10253f05707262c2632a1257945320cfed4ac104f74f3" Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.234028 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-7c6599c744-svfvf"] Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.308097 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7788568699-swzvp"] Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.313781 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7788568699-swzvp"] Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.668692 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:17:31 crc kubenswrapper[4780]: I1210 11:17:31.998996 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" path="/var/lib/kubelet/pods/2df2ca18-27cb-48e0-842c-f346f4703a37/volumes" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.000816 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" path="/var/lib/kubelet/pods/c09a9d73-c35e-40da-b545-ebf1b8724896/volumes" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.056778 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerStarted","Data":"266f48cb76a59d57845bd1ada8add0c320c8a13a0ab4990ced4840221ce5efe0"} Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.086829 4780 generic.go:334] "Generic (PLEG): container finished" podID="068aca35-6c22-4164-bf63-481836e82331" containerID="4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" exitCode=0 Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.086939 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-f7d475897-pp967" event={"ID":"068aca35-6c22-4164-bf63-481836e82331","Type":"ContainerDied","Data":"4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73"} Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.343471 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.512375 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-combined-ca-bundle\") pod \"068aca35-6c22-4164-bf63-481836e82331\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.512500 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data-custom\") pod \"068aca35-6c22-4164-bf63-481836e82331\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.512955 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data\") pod \"068aca35-6c22-4164-bf63-481836e82331\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.513147 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9h5xt\" (UniqueName: \"kubernetes.io/projected/068aca35-6c22-4164-bf63-481836e82331-kube-api-access-9h5xt\") pod \"068aca35-6c22-4164-bf63-481836e82331\" (UID: \"068aca35-6c22-4164-bf63-481836e82331\") " Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.541291 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "068aca35-6c22-4164-bf63-481836e82331" (UID: "068aca35-6c22-4164-bf63-481836e82331"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.549962 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/068aca35-6c22-4164-bf63-481836e82331-kube-api-access-9h5xt" (OuterVolumeSpecName: "kube-api-access-9h5xt") pod "068aca35-6c22-4164-bf63-481836e82331" (UID: "068aca35-6c22-4164-bf63-481836e82331"). InnerVolumeSpecName "kube-api-access-9h5xt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.617179 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9h5xt\" (UniqueName: \"kubernetes.io/projected/068aca35-6c22-4164-bf63-481836e82331-kube-api-access-9h5xt\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.617508 4780 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.628132 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "068aca35-6c22-4164-bf63-481836e82331" (UID: "068aca35-6c22-4164-bf63-481836e82331"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.721090 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.751139 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.751558 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerName="glance-log" containerID="cri-o://b029d12eb9344ff501ff60ef04a516ec7c1d43f95113d256f618ca731a444aeb" gracePeriod=30 Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.751912 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerName="glance-httpd" containerID="cri-o://d105d3edbf26886ffbbc006bf2dc1b26d894a59541adaae7e0c1609bc379c05b" gracePeriod=30 Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.891000 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data" (OuterVolumeSpecName: "config-data") pod "068aca35-6c22-4164-bf63-481836e82331" (UID: "068aca35-6c22-4164-bf63-481836e82331"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.931076 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/068aca35-6c22-4164-bf63-481836e82331-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:32 crc kubenswrapper[4780]: I1210 11:17:32.990164 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="5a0987c6-976b-4d4e-9456-4516cdaf53a0" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.226:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.026186 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.144126 4780 generic.go:334] "Generic (PLEG): container finished" podID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerID="b029d12eb9344ff501ff60ef04a516ec7c1d43f95113d256f618ca731a444aeb" exitCode=143 Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.144288 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ae84be8f-342b-4f21-9aea-4ebf423af61c","Type":"ContainerDied","Data":"b029d12eb9344ff501ff60ef04a516ec7c1d43f95113d256f618ca731a444aeb"} Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.152897 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-f7d475897-pp967" event={"ID":"068aca35-6c22-4164-bf63-481836e82331","Type":"ContainerDied","Data":"7d313c19a96f3874a8722b95a742a756b706add0bf4b7f904bcaa178705b257e"} Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.152983 4780 scope.go:117] "RemoveContainer" containerID="4ea629a8edba4ea35e58de98127ed891339d71b7d3e39119bc8fb00fe19c1d73" Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.153168 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-f7d475897-pp967" Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.247864 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-f7d475897-pp967"] Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.266275 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-f7d475897-pp967"] Dec 10 11:17:33 crc kubenswrapper[4780]: I1210 11:17:33.518590 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:17:34 crc kubenswrapper[4780]: I1210 11:17:34.479765 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="068aca35-6c22-4164-bf63-481836e82331" path="/var/lib/kubelet/pods/068aca35-6c22-4164-bf63-481836e82331/volumes" Dec 10 11:17:35 crc kubenswrapper[4780]: I1210 11:17:35.570033 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerStarted","Data":"cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f"} Dec 10 11:17:36 crc kubenswrapper[4780]: I1210 11:17:36.756300 4780 generic.go:334] "Generic (PLEG): container finished" podID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerID="d105d3edbf26886ffbbc006bf2dc1b26d894a59541adaae7e0c1609bc379c05b" exitCode=0 Dec 10 11:17:36 crc kubenswrapper[4780]: I1210 11:17:36.757559 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ae84be8f-342b-4f21-9aea-4ebf423af61c","Type":"ContainerDied","Data":"d105d3edbf26886ffbbc006bf2dc1b26d894a59541adaae7e0c1609bc379c05b"} Dec 10 11:17:36 crc kubenswrapper[4780]: I1210 11:17:36.787822 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerStarted","Data":"22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f"} Dec 10 11:17:37 crc kubenswrapper[4780]: E1210 11:17:37.243944 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae84be8f_342b_4f21_9aea_4ebf423af61c.slice/crio-conmon-d105d3edbf26886ffbbc006bf2dc1b26d894a59541adaae7e0c1609bc379c05b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae84be8f_342b_4f21_9aea_4ebf423af61c.slice/crio-d105d3edbf26886ffbbc006bf2dc1b26d894a59541adaae7e0c1609bc379c05b.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.108664 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.247067 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-combined-ca-bundle\") pod \"ae84be8f-342b-4f21-9aea-4ebf423af61c\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.247191 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kj6x\" (UniqueName: \"kubernetes.io/projected/ae84be8f-342b-4f21-9aea-4ebf423af61c-kube-api-access-4kj6x\") pod \"ae84be8f-342b-4f21-9aea-4ebf423af61c\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.247260 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-internal-tls-certs\") pod \"ae84be8f-342b-4f21-9aea-4ebf423af61c\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.247418 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-scripts\") pod \"ae84be8f-342b-4f21-9aea-4ebf423af61c\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.247509 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ae84be8f-342b-4f21-9aea-4ebf423af61c\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.248491 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-logs\") pod \"ae84be8f-342b-4f21-9aea-4ebf423af61c\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.249335 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-logs" (OuterVolumeSpecName: "logs") pod "ae84be8f-342b-4f21-9aea-4ebf423af61c" (UID: "ae84be8f-342b-4f21-9aea-4ebf423af61c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.249454 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-config-data\") pod \"ae84be8f-342b-4f21-9aea-4ebf423af61c\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.249776 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-httpd-run\") pod \"ae84be8f-342b-4f21-9aea-4ebf423af61c\" (UID: \"ae84be8f-342b-4f21-9aea-4ebf423af61c\") " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.250690 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.252875 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ae84be8f-342b-4f21-9aea-4ebf423af61c" (UID: "ae84be8f-342b-4f21-9aea-4ebf423af61c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.270531 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae84be8f-342b-4f21-9aea-4ebf423af61c-kube-api-access-4kj6x" (OuterVolumeSpecName: "kube-api-access-4kj6x") pod "ae84be8f-342b-4f21-9aea-4ebf423af61c" (UID: "ae84be8f-342b-4f21-9aea-4ebf423af61c"). InnerVolumeSpecName "kube-api-access-4kj6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.282731 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "ae84be8f-342b-4f21-9aea-4ebf423af61c" (UID: "ae84be8f-342b-4f21-9aea-4ebf423af61c"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.309197 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-scripts" (OuterVolumeSpecName: "scripts") pod "ae84be8f-342b-4f21-9aea-4ebf423af61c" (UID: "ae84be8f-342b-4f21-9aea-4ebf423af61c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.365075 4780 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ae84be8f-342b-4f21-9aea-4ebf423af61c-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.365126 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kj6x\" (UniqueName: \"kubernetes.io/projected/ae84be8f-342b-4f21-9aea-4ebf423af61c-kube-api-access-4kj6x\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.365154 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.365182 4780 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.452615 4780 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.473696 4780 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.479104 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae84be8f-342b-4f21-9aea-4ebf423af61c" (UID: "ae84be8f-342b-4f21-9aea-4ebf423af61c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.586942 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-config-data" (OuterVolumeSpecName: "config-data") pod "ae84be8f-342b-4f21-9aea-4ebf423af61c" (UID: "ae84be8f-342b-4f21-9aea-4ebf423af61c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.587442 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ae84be8f-342b-4f21-9aea-4ebf423af61c" (UID: "ae84be8f-342b-4f21-9aea-4ebf423af61c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.591460 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.591513 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:38 crc kubenswrapper[4780]: I1210 11:17:38.591532 4780 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae84be8f-342b-4f21-9aea-4ebf423af61c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.038658 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ae84be8f-342b-4f21-9aea-4ebf423af61c","Type":"ContainerDied","Data":"1b58f30407051b39005b3a345a6724dd693d48fdcf08affd8fe8c411f53f9b0d"} Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.038758 4780 scope.go:117] "RemoveContainer" containerID="d105d3edbf26886ffbbc006bf2dc1b26d894a59541adaae7e0c1609bc379c05b" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.039344 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.141660 4780 scope.go:117] "RemoveContainer" containerID="b029d12eb9344ff501ff60ef04a516ec7c1d43f95113d256f618ca731a444aeb" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.150120 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.173707 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.252109 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:17:39 crc kubenswrapper[4780]: E1210 11:17:39.253263 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" containerName="heat-cfnapi" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253298 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" containerName="heat-cfnapi" Dec 10 11:17:39 crc kubenswrapper[4780]: E1210 11:17:39.253314 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" containerName="heat-api" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253324 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" containerName="heat-api" Dec 10 11:17:39 crc kubenswrapper[4780]: E1210 11:17:39.253348 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" containerName="heat-api" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253363 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" containerName="heat-api" Dec 10 11:17:39 crc kubenswrapper[4780]: E1210 11:17:39.253399 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerName="glance-httpd" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253412 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerName="glance-httpd" Dec 10 11:17:39 crc kubenswrapper[4780]: E1210 11:17:39.253427 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="068aca35-6c22-4164-bf63-481836e82331" containerName="heat-engine" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253436 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="068aca35-6c22-4164-bf63-481836e82331" containerName="heat-engine" Dec 10 11:17:39 crc kubenswrapper[4780]: E1210 11:17:39.253473 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerName="glance-log" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253484 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerName="glance-log" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253899 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" containerName="heat-cfnapi" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253979 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" containerName="heat-cfnapi" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.253997 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerName="glance-httpd" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.254014 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" containerName="heat-api" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.254027 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" containerName="glance-log" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.254047 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="068aca35-6c22-4164-bf63-481836e82331" containerName="heat-engine" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.254067 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c09a9d73-c35e-40da-b545-ebf1b8724896" containerName="heat-api" Dec 10 11:17:39 crc kubenswrapper[4780]: E1210 11:17:39.254519 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" containerName="heat-cfnapi" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.254542 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df2ca18-27cb-48e0-842c-f346f4703a37" containerName="heat-cfnapi" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.256581 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.261908 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.279513 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.283037 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.328211 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.328854 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.328950 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.329582 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8spf\" (UniqueName: \"kubernetes.io/projected/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-kube-api-access-m8spf\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.329677 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.330084 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.330153 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.330407 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-logs\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.455329 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.455413 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.455445 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.455704 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8spf\" (UniqueName: \"kubernetes.io/projected/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-kube-api-access-m8spf\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.455755 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.456018 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.456067 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.456213 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-logs\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.456719 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-logs\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.462166 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.462658 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.477775 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.478375 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.483823 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.484204 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8spf\" (UniqueName: \"kubernetes.io/projected/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-kube-api-access-m8spf\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.507972 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8469667-f6d8-4f05-98b9-7e48fe11bbb9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.735784 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"d8469667-f6d8-4f05-98b9-7e48fe11bbb9\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.928751 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.963480 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:17:39 crc kubenswrapper[4780]: E1210 11:17:39.963992 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:17:39 crc kubenswrapper[4780]: I1210 11:17:39.989379 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae84be8f-342b-4f21-9aea-4ebf423af61c" path="/var/lib/kubelet/pods/ae84be8f-342b-4f21-9aea-4ebf423af61c/volumes" Dec 10 11:17:40 crc kubenswrapper[4780]: I1210 11:17:40.091643 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerStarted","Data":"7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6"} Dec 10 11:17:41 crc kubenswrapper[4780]: I1210 11:17:41.693903 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:17:41 crc kubenswrapper[4780]: W1210 11:17:41.749322 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8469667_f6d8_4f05_98b9_7e48fe11bbb9.slice/crio-d69e277eeed9b692812eb4df3bd768c3d9d14484981b7394751bcbea49c81196 WatchSource:0}: Error finding container d69e277eeed9b692812eb4df3bd768c3d9d14484981b7394751bcbea49c81196: Status 404 returned error can't find the container with id d69e277eeed9b692812eb4df3bd768c3d9d14484981b7394751bcbea49c81196 Dec 10 11:17:42 crc kubenswrapper[4780]: I1210 11:17:42.208460 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d8469667-f6d8-4f05-98b9-7e48fe11bbb9","Type":"ContainerStarted","Data":"d69e277eeed9b692812eb4df3bd768c3d9d14484981b7394751bcbea49c81196"} Dec 10 11:17:42 crc kubenswrapper[4780]: I1210 11:17:42.215347 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="ceilometer-central-agent" containerID="cri-o://cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f" gracePeriod=30 Dec 10 11:17:42 crc kubenswrapper[4780]: I1210 11:17:42.215419 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:17:42 crc kubenswrapper[4780]: I1210 11:17:42.215513 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="ceilometer-notification-agent" containerID="cri-o://22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f" gracePeriod=30 Dec 10 11:17:42 crc kubenswrapper[4780]: I1210 11:17:42.215547 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="sg-core" containerID="cri-o://7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6" gracePeriod=30 Dec 10 11:17:42 crc kubenswrapper[4780]: I1210 11:17:42.215753 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="proxy-httpd" containerID="cri-o://36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031" gracePeriod=30 Dec 10 11:17:42 crc kubenswrapper[4780]: I1210 11:17:42.260832 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.359747213 podStartE2EDuration="14.260800716s" podCreationTimestamp="2025-12-10 11:17:28 +0000 UTC" firstStartedPulling="2025-12-10 11:17:31.473500146 +0000 UTC m=+1956.326893589" lastFinishedPulling="2025-12-10 11:17:41.374553649 +0000 UTC m=+1966.227947092" observedRunningTime="2025-12-10 11:17:42.256744732 +0000 UTC m=+1967.110138185" watchObservedRunningTime="2025-12-10 11:17:42.260800716 +0000 UTC m=+1967.114194159" Dec 10 11:17:43 crc kubenswrapper[4780]: I1210 11:17:43.268131 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d8469667-f6d8-4f05-98b9-7e48fe11bbb9","Type":"ContainerStarted","Data":"a8d485facb6ad03c5916970e442b899a751b2e462b45defe0956c365df509a20"} Dec 10 11:17:43 crc kubenswrapper[4780]: I1210 11:17:43.295829 4780 generic.go:334] "Generic (PLEG): container finished" podID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerID="7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6" exitCode=2 Dec 10 11:17:43 crc kubenswrapper[4780]: I1210 11:17:43.295880 4780 generic.go:334] "Generic (PLEG): container finished" podID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerID="22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f" exitCode=0 Dec 10 11:17:43 crc kubenswrapper[4780]: I1210 11:17:43.295903 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerStarted","Data":"36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031"} Dec 10 11:17:43 crc kubenswrapper[4780]: I1210 11:17:43.295959 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerDied","Data":"7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6"} Dec 10 11:17:43 crc kubenswrapper[4780]: I1210 11:17:43.295971 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerDied","Data":"22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f"} Dec 10 11:17:45 crc kubenswrapper[4780]: I1210 11:17:45.351973 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d8469667-f6d8-4f05-98b9-7e48fe11bbb9","Type":"ContainerStarted","Data":"eb0fd2a71117805b15ab7f6a3c987dce06889bc39273af685fed565632815fd8"} Dec 10 11:17:45 crc kubenswrapper[4780]: I1210 11:17:45.401538 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.401505403 podStartE2EDuration="6.401505403s" podCreationTimestamp="2025-12-10 11:17:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:17:45.385509543 +0000 UTC m=+1970.238902986" watchObservedRunningTime="2025-12-10 11:17:45.401505403 +0000 UTC m=+1970.254898846" Dec 10 11:17:48 crc kubenswrapper[4780]: I1210 11:17:48.799887 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:17:48 crc kubenswrapper[4780]: I1210 11:17:48.801265 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerName="glance-log" containerID="cri-o://1b74e2259d47df651ee8f4292b49232970777bddcdd72773fc974ebf09088744" gracePeriod=30 Dec 10 11:17:48 crc kubenswrapper[4780]: I1210 11:17:48.802280 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerName="glance-httpd" containerID="cri-o://b18c757728afae5e081c02b751aec4d477c1c4d1abe695f931c7a8eced470499" gracePeriod=30 Dec 10 11:17:49 crc kubenswrapper[4780]: I1210 11:17:49.457521 4780 generic.go:334] "Generic (PLEG): container finished" podID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerID="1b74e2259d47df651ee8f4292b49232970777bddcdd72773fc974ebf09088744" exitCode=143 Dec 10 11:17:49 crc kubenswrapper[4780]: I1210 11:17:49.458146 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9fd7774f-b900-4586-a38d-d7fb0e4991f7","Type":"ContainerDied","Data":"1b74e2259d47df651ee8f4292b49232970777bddcdd72773fc974ebf09088744"} Dec 10 11:17:49 crc kubenswrapper[4780]: I1210 11:17:49.929491 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:49 crc kubenswrapper[4780]: I1210 11:17:49.930150 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:49 crc kubenswrapper[4780]: I1210 11:17:49.998910 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:50 crc kubenswrapper[4780]: I1210 11:17:50.015133 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:50 crc kubenswrapper[4780]: I1210 11:17:50.491659 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:50 crc kubenswrapper[4780]: I1210 11:17:50.491773 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:52 crc kubenswrapper[4780]: I1210 11:17:52.030044 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:17:52 crc kubenswrapper[4780]: E1210 11:17:52.030518 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:17:52 crc kubenswrapper[4780]: I1210 11:17:52.806027 4780 generic.go:334] "Generic (PLEG): container finished" podID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerID="b18c757728afae5e081c02b751aec4d477c1c4d1abe695f931c7a8eced470499" exitCode=0 Dec 10 11:17:52 crc kubenswrapper[4780]: I1210 11:17:52.806115 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9fd7774f-b900-4586-a38d-d7fb0e4991f7","Type":"ContainerDied","Data":"b18c757728afae5e081c02b751aec4d477c1c4d1abe695f931c7a8eced470499"} Dec 10 11:17:53 crc kubenswrapper[4780]: I1210 11:17:53.884981 4780 generic.go:334] "Generic (PLEG): container finished" podID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerID="cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f" exitCode=0 Dec 10 11:17:53 crc kubenswrapper[4780]: I1210 11:17:53.885061 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerDied","Data":"cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f"} Dec 10 11:17:54 crc kubenswrapper[4780]: I1210 11:17:54.934422 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:54 crc kubenswrapper[4780]: I1210 11:17:54.934544 4780 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:17:54 crc kubenswrapper[4780]: I1210 11:17:54.947207 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 10 11:17:57 crc kubenswrapper[4780]: E1210 11:17:57.519302 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified" Dec 10 11:17:57 crc kubenswrapper[4780]: E1210 11:17:57.520745 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nova-cell0-conductor-db-sync,Image:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CELL_NAME,Value:cell0,ValueFrom:nil,},EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:false,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/kolla/config_files/config.json,SubPath:nova-conductor-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-crhlm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42436,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-cell0-conductor-db-sync-slw88_openstack(a94e2b82-6087-4791-8f15-e1ca9e25028b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:17:57 crc kubenswrapper[4780]: E1210 11:17:57.522063 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/nova-cell0-conductor-db-sync-slw88" podUID="a94e2b82-6087-4791-8f15-e1ca9e25028b" Dec 10 11:17:58 crc kubenswrapper[4780]: E1210 11:17:58.006665 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified\\\"\"" pod="openstack/nova-cell0-conductor-db-sync-slw88" podUID="a94e2b82-6087-4791-8f15-e1ca9e25028b" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.282542 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.372711 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7b98m\" (UniqueName: \"kubernetes.io/projected/9fd7774f-b900-4586-a38d-d7fb0e4991f7-kube-api-access-7b98m\") pod \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.373294 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-combined-ca-bundle\") pod \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.373341 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-public-tls-certs\") pod \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.373451 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.373496 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-config-data\") pod \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.373534 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-logs\") pod \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.373605 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-httpd-run\") pod \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.373688 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-scripts\") pod \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\" (UID: \"9fd7774f-b900-4586-a38d-d7fb0e4991f7\") " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.375569 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9fd7774f-b900-4586-a38d-d7fb0e4991f7" (UID: "9fd7774f-b900-4586-a38d-d7fb0e4991f7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.376135 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-logs" (OuterVolumeSpecName: "logs") pod "9fd7774f-b900-4586-a38d-d7fb0e4991f7" (UID: "9fd7774f-b900-4586-a38d-d7fb0e4991f7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.388487 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fd7774f-b900-4586-a38d-d7fb0e4991f7-kube-api-access-7b98m" (OuterVolumeSpecName: "kube-api-access-7b98m") pod "9fd7774f-b900-4586-a38d-d7fb0e4991f7" (UID: "9fd7774f-b900-4586-a38d-d7fb0e4991f7"). InnerVolumeSpecName "kube-api-access-7b98m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.399261 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-scripts" (OuterVolumeSpecName: "scripts") pod "9fd7774f-b900-4586-a38d-d7fb0e4991f7" (UID: "9fd7774f-b900-4586-a38d-d7fb0e4991f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.401264 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "9fd7774f-b900-4586-a38d-d7fb0e4991f7" (UID: "9fd7774f-b900-4586-a38d-d7fb0e4991f7"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.481380 4780 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.492039 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.492849 4780 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9fd7774f-b900-4586-a38d-d7fb0e4991f7-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.493010 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.493123 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7b98m\" (UniqueName: \"kubernetes.io/projected/9fd7774f-b900-4586-a38d-d7fb0e4991f7-kube-api-access-7b98m\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.504244 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-config-data" (OuterVolumeSpecName: "config-data") pod "9fd7774f-b900-4586-a38d-d7fb0e4991f7" (UID: "9fd7774f-b900-4586-a38d-d7fb0e4991f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.558438 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "9fd7774f-b900-4586-a38d-d7fb0e4991f7" (UID: "9fd7774f-b900-4586-a38d-d7fb0e4991f7"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.577705 4780 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.588120 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9fd7774f-b900-4586-a38d-d7fb0e4991f7" (UID: "9fd7774f-b900-4586-a38d-d7fb0e4991f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.596735 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.596791 4780 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.596806 4780 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:58 crc kubenswrapper[4780]: I1210 11:17:58.596817 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9fd7774f-b900-4586-a38d-d7fb0e4991f7-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.008185 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9fd7774f-b900-4586-a38d-d7fb0e4991f7","Type":"ContainerDied","Data":"8279fe947159ba9cef1744101f0f5695758c5c9a055587e636bb351ac846c30b"} Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.008267 4780 scope.go:117] "RemoveContainer" containerID="b18c757728afae5e081c02b751aec4d477c1c4d1abe695f931c7a8eced470499" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.008313 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.474007 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.503214 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.518498 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:17:59 crc kubenswrapper[4780]: E1210 11:17:59.520583 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerName="glance-httpd" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.520841 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerName="glance-httpd" Dec 10 11:17:59 crc kubenswrapper[4780]: E1210 11:17:59.521002 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerName="glance-log" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.521124 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerName="glance-log" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.521584 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerName="glance-log" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.521739 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" containerName="glance-httpd" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.530610 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.540221 4780 scope.go:117] "RemoveContainer" containerID="1b74e2259d47df651ee8f4292b49232970777bddcdd72773fc974ebf09088744" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.541361 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.542263 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.585691 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.647702 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.724299 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-config-data\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.724511 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.724540 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/19d9e474-ad81-4f65-aad2-de223d59c35f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.724564 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19d9e474-ad81-4f65-aad2-de223d59c35f-logs\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.724752 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.724827 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.724957 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-scripts\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.725051 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8ps5\" (UniqueName: \"kubernetes.io/projected/19d9e474-ad81-4f65-aad2-de223d59c35f-kube-api-access-l8ps5\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.828623 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-scripts\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.828729 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8ps5\" (UniqueName: \"kubernetes.io/projected/19d9e474-ad81-4f65-aad2-de223d59c35f-kube-api-access-l8ps5\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.828789 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-config-data\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.828905 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.828976 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/19d9e474-ad81-4f65-aad2-de223d59c35f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.829001 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19d9e474-ad81-4f65-aad2-de223d59c35f-logs\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.829157 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.829207 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.830835 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19d9e474-ad81-4f65-aad2-de223d59c35f-logs\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.831405 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/19d9e474-ad81-4f65-aad2-de223d59c35f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.836885 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-scripts\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.838444 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.838715 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.841550 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.851063 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19d9e474-ad81-4f65-aad2-de223d59c35f-config-data\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.861338 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8ps5\" (UniqueName: \"kubernetes.io/projected/19d9e474-ad81-4f65-aad2-de223d59c35f-kube-api-access-l8ps5\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.906116 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"19d9e474-ad81-4f65-aad2-de223d59c35f\") " pod="openstack/glance-default-external-api-0" Dec 10 11:17:59 crc kubenswrapper[4780]: I1210 11:17:59.984134 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fd7774f-b900-4586-a38d-d7fb0e4991f7" path="/var/lib/kubelet/pods/9fd7774f-b900-4586-a38d-d7fb0e4991f7/volumes" Dec 10 11:18:00 crc kubenswrapper[4780]: I1210 11:18:00.191180 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:18:01 crc kubenswrapper[4780]: I1210 11:18:01.138058 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:18:02 crc kubenswrapper[4780]: I1210 11:18:02.090733 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"19d9e474-ad81-4f65-aad2-de223d59c35f","Type":"ContainerStarted","Data":"b7fcde6d372f2029f8802491d1657969f2b92761965b96b36efc82a3903bde6a"} Dec 10 11:18:02 crc kubenswrapper[4780]: I1210 11:18:02.091815 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"19d9e474-ad81-4f65-aad2-de223d59c35f","Type":"ContainerStarted","Data":"d5a0d8097c22ba09f25d1af27f697574e776969e771f672f7f6246a5af59f002"} Dec 10 11:18:03 crc kubenswrapper[4780]: I1210 11:18:03.137075 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"19d9e474-ad81-4f65-aad2-de223d59c35f","Type":"ContainerStarted","Data":"5f0b3b3c9ad19fe99b4962c0a50763e831d71ca094d286f7854cce27cdde3f69"} Dec 10 11:18:03 crc kubenswrapper[4780]: I1210 11:18:03.576911 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.576872811 podStartE2EDuration="4.576872811s" podCreationTimestamp="2025-12-10 11:17:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:18:03.576170833 +0000 UTC m=+1988.429564276" watchObservedRunningTime="2025-12-10 11:18:03.576872811 +0000 UTC m=+1988.430266254" Dec 10 11:18:06 crc kubenswrapper[4780]: I1210 11:18:06.011455 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:18:06 crc kubenswrapper[4780]: E1210 11:18:06.012548 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:18:10 crc kubenswrapper[4780]: I1210 11:18:10.193991 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 10 11:18:10 crc kubenswrapper[4780]: I1210 11:18:10.194892 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 10 11:18:10 crc kubenswrapper[4780]: I1210 11:18:10.545083 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 10 11:18:10 crc kubenswrapper[4780]: I1210 11:18:10.546352 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:18:10 crc kubenswrapper[4780]: I1210 11:18:10.549426 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 10 11:18:11 crc kubenswrapper[4780]: I1210 11:18:11.406617 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:18:13 crc kubenswrapper[4780]: I1210 11:18:13.326963 4780 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.286641 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.428539 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-sg-core-conf-yaml\") pod \"1c47b7ce-4a43-460f-8386-d1541a901ea2\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.428656 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-run-httpd\") pod \"1c47b7ce-4a43-460f-8386-d1541a901ea2\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.428712 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-combined-ca-bundle\") pod \"1c47b7ce-4a43-460f-8386-d1541a901ea2\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.473512 4780 generic.go:334] "Generic (PLEG): container finished" podID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerID="36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031" exitCode=137 Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.473591 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerDied","Data":"36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031"} Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.473633 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1c47b7ce-4a43-460f-8386-d1541a901ea2","Type":"ContainerDied","Data":"266f48cb76a59d57845bd1ada8add0c320c8a13a0ab4990ced4840221ce5efe0"} Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.473658 4780 scope.go:117] "RemoveContainer" containerID="36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.473975 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.476476 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-scripts\") pod \"1c47b7ce-4a43-460f-8386-d1541a901ea2\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.476581 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-config-data\") pod \"1c47b7ce-4a43-460f-8386-d1541a901ea2\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.476648 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skksj\" (UniqueName: \"kubernetes.io/projected/1c47b7ce-4a43-460f-8386-d1541a901ea2-kube-api-access-skksj\") pod \"1c47b7ce-4a43-460f-8386-d1541a901ea2\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.476766 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-log-httpd\") pod \"1c47b7ce-4a43-460f-8386-d1541a901ea2\" (UID: \"1c47b7ce-4a43-460f-8386-d1541a901ea2\") " Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.481518 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1c47b7ce-4a43-460f-8386-d1541a901ea2" (UID: "1c47b7ce-4a43-460f-8386-d1541a901ea2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.493805 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1c47b7ce-4a43-460f-8386-d1541a901ea2" (UID: "1c47b7ce-4a43-460f-8386-d1541a901ea2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.496546 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.496579 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1c47b7ce-4a43-460f-8386-d1541a901ea2-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.497282 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c47b7ce-4a43-460f-8386-d1541a901ea2-kube-api-access-skksj" (OuterVolumeSpecName: "kube-api-access-skksj") pod "1c47b7ce-4a43-460f-8386-d1541a901ea2" (UID: "1c47b7ce-4a43-460f-8386-d1541a901ea2"). InnerVolumeSpecName "kube-api-access-skksj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.546435 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-scripts" (OuterVolumeSpecName: "scripts") pod "1c47b7ce-4a43-460f-8386-d1541a901ea2" (UID: "1c47b7ce-4a43-460f-8386-d1541a901ea2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.546523 4780 scope.go:117] "RemoveContainer" containerID="7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.608799 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.608847 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skksj\" (UniqueName: \"kubernetes.io/projected/1c47b7ce-4a43-460f-8386-d1541a901ea2-kube-api-access-skksj\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.652193 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1c47b7ce-4a43-460f-8386-d1541a901ea2" (UID: "1c47b7ce-4a43-460f-8386-d1541a901ea2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.711762 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.717585 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c47b7ce-4a43-460f-8386-d1541a901ea2" (UID: "1c47b7ce-4a43-460f-8386-d1541a901ea2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.735133 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-config-data" (OuterVolumeSpecName: "config-data") pod "1c47b7ce-4a43-460f-8386-d1541a901ea2" (UID: "1c47b7ce-4a43-460f-8386-d1541a901ea2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.814647 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.815231 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c47b7ce-4a43-460f-8386-d1541a901ea2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.874613 4780 scope.go:117] "RemoveContainer" containerID="22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.969368 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.995928 4780 scope.go:117] "RemoveContainer" containerID="cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f" Dec 10 11:18:14 crc kubenswrapper[4780]: I1210 11:18:14.996145 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.014292 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:15 crc kubenswrapper[4780]: E1210 11:18:15.025014 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="ceilometer-central-agent" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.025068 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="ceilometer-central-agent" Dec 10 11:18:15 crc kubenswrapper[4780]: E1210 11:18:15.025092 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="ceilometer-notification-agent" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.025098 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="ceilometer-notification-agent" Dec 10 11:18:15 crc kubenswrapper[4780]: E1210 11:18:15.025107 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="proxy-httpd" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.025114 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="proxy-httpd" Dec 10 11:18:15 crc kubenswrapper[4780]: E1210 11:18:15.025167 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="sg-core" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.025174 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="sg-core" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.025467 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="sg-core" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.025488 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="ceilometer-central-agent" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.025496 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="ceilometer-notification-agent" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.025514 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" containerName="proxy-httpd" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.028157 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.033620 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.034172 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.034508 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.125888 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-scripts\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.126022 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-log-httpd\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.127147 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.127412 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.127511 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-config-data\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.127732 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-run-httpd\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.127804 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45vm9\" (UniqueName: \"kubernetes.io/projected/d7731ed4-cbc6-4007-a419-c5456956d232-kube-api-access-45vm9\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.186270 4780 scope.go:117] "RemoveContainer" containerID="36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031" Dec 10 11:18:15 crc kubenswrapper[4780]: E1210 11:18:15.190662 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031\": container with ID starting with 36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031 not found: ID does not exist" containerID="36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.190720 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031"} err="failed to get container status \"36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031\": rpc error: code = NotFound desc = could not find container \"36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031\": container with ID starting with 36b8095f177811aa3c6035999f4132d1f77151d77183e566d5829775fdabc031 not found: ID does not exist" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.190753 4780 scope.go:117] "RemoveContainer" containerID="7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6" Dec 10 11:18:15 crc kubenswrapper[4780]: E1210 11:18:15.196317 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6\": container with ID starting with 7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6 not found: ID does not exist" containerID="7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.196963 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6"} err="failed to get container status \"7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6\": rpc error: code = NotFound desc = could not find container \"7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6\": container with ID starting with 7c28bd9c0c28556329a1c3581c699a86ccb0706e4da86907781df7f44ef0bbc6 not found: ID does not exist" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.197034 4780 scope.go:117] "RemoveContainer" containerID="22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f" Dec 10 11:18:15 crc kubenswrapper[4780]: E1210 11:18:15.202333 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f\": container with ID starting with 22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f not found: ID does not exist" containerID="22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.202399 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f"} err="failed to get container status \"22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f\": rpc error: code = NotFound desc = could not find container \"22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f\": container with ID starting with 22a57e0d43475e2f1965e8a933202d803b3854db130af647c8598504c4ace69f not found: ID does not exist" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.202442 4780 scope.go:117] "RemoveContainer" containerID="cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f" Dec 10 11:18:15 crc kubenswrapper[4780]: E1210 11:18:15.203167 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f\": container with ID starting with cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f not found: ID does not exist" containerID="cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.203218 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f"} err="failed to get container status \"cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f\": rpc error: code = NotFound desc = could not find container \"cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f\": container with ID starting with cc4676babe1ae564f0cd13928143b80c7ed350e0879138bf5b9c82c3cdfea27f not found: ID does not exist" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.230721 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-run-httpd\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.230795 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45vm9\" (UniqueName: \"kubernetes.io/projected/d7731ed4-cbc6-4007-a419-c5456956d232-kube-api-access-45vm9\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.231037 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-scripts\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.231662 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-run-httpd\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.235121 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-log-httpd\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.235261 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.235506 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.235620 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-config-data\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.239506 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-log-httpd\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.245801 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.246258 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-scripts\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.246374 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.246777 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-config-data\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.253913 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45vm9\" (UniqueName: \"kubernetes.io/projected/d7731ed4-cbc6-4007-a419-c5456956d232-kube-api-access-45vm9\") pod \"ceilometer-0\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.467033 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.518558 4780 generic.go:334] "Generic (PLEG): container finished" podID="77c41a0f-d539-496a-85f5-f6aec31747a9" containerID="b96b27bbc2aaeec41555ddb02793a2d4d03ca6571a8980464824c3158cc557f4" exitCode=137 Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.518649 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7bc68bff5f-xvzgg" event={"ID":"77c41a0f-d539-496a-85f5-f6aec31747a9","Type":"ContainerDied","Data":"b96b27bbc2aaeec41555ddb02793a2d4d03ca6571a8980464824c3158cc557f4"} Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.528029 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-slw88" event={"ID":"a94e2b82-6087-4791-8f15-e1ca9e25028b","Type":"ContainerStarted","Data":"c36d091e25c263274562bbc15d2b6e015d2fa99fce1a12ab74953cb7b4d4452b"} Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.571056 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-slw88" podStartSLOduration=3.295309652 podStartE2EDuration="55.571020341s" podCreationTimestamp="2025-12-10 11:17:20 +0000 UTC" firstStartedPulling="2025-12-10 11:17:22.386122159 +0000 UTC m=+1947.239515602" lastFinishedPulling="2025-12-10 11:18:14.661832848 +0000 UTC m=+1999.515226291" observedRunningTime="2025-12-10 11:18:15.563451947 +0000 UTC m=+2000.416845390" watchObservedRunningTime="2025-12-10 11:18:15.571020341 +0000 UTC m=+2000.424413784" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.575725 4780 generic.go:334] "Generic (PLEG): container finished" podID="19201430-5ecc-4a0b-ad28-5cdfff8d037a" containerID="17f44b9919535fa55e47ec91ea4abc4f74f00d2e89680cb06d340a52b696ac25" exitCode=137 Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.575785 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" event={"ID":"19201430-5ecc-4a0b-ad28-5cdfff8d037a","Type":"ContainerDied","Data":"17f44b9919535fa55e47ec91ea4abc4f74f00d2e89680cb06d340a52b696ac25"} Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.778685 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.804149 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.861953 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data-custom\") pod \"77c41a0f-d539-496a-85f5-f6aec31747a9\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.862047 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-combined-ca-bundle\") pod \"77c41a0f-d539-496a-85f5-f6aec31747a9\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.862215 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data\") pod \"77c41a0f-d539-496a-85f5-f6aec31747a9\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.862262 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nztz\" (UniqueName: \"kubernetes.io/projected/77c41a0f-d539-496a-85f5-f6aec31747a9-kube-api-access-7nztz\") pod \"77c41a0f-d539-496a-85f5-f6aec31747a9\" (UID: \"77c41a0f-d539-496a-85f5-f6aec31747a9\") " Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.875676 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77c41a0f-d539-496a-85f5-f6aec31747a9-kube-api-access-7nztz" (OuterVolumeSpecName: "kube-api-access-7nztz") pod "77c41a0f-d539-496a-85f5-f6aec31747a9" (UID: "77c41a0f-d539-496a-85f5-f6aec31747a9"). InnerVolumeSpecName "kube-api-access-7nztz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.883747 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "77c41a0f-d539-496a-85f5-f6aec31747a9" (UID: "77c41a0f-d539-496a-85f5-f6aec31747a9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.936587 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77c41a0f-d539-496a-85f5-f6aec31747a9" (UID: "77c41a0f-d539-496a-85f5-f6aec31747a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.973517 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data\") pod \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.973579 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-combined-ca-bundle\") pod \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.974013 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data-custom\") pod \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.974055 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dbgc\" (UniqueName: \"kubernetes.io/projected/19201430-5ecc-4a0b-ad28-5cdfff8d037a-kube-api-access-5dbgc\") pod \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\" (UID: \"19201430-5ecc-4a0b-ad28-5cdfff8d037a\") " Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.975555 4780 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.975587 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:15 crc kubenswrapper[4780]: I1210 11:18:15.975601 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nztz\" (UniqueName: \"kubernetes.io/projected/77c41a0f-d539-496a-85f5-f6aec31747a9-kube-api-access-7nztz\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.017269 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "19201430-5ecc-4a0b-ad28-5cdfff8d037a" (UID: "19201430-5ecc-4a0b-ad28-5cdfff8d037a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.022707 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19201430-5ecc-4a0b-ad28-5cdfff8d037a-kube-api-access-5dbgc" (OuterVolumeSpecName: "kube-api-access-5dbgc") pod "19201430-5ecc-4a0b-ad28-5cdfff8d037a" (UID: "19201430-5ecc-4a0b-ad28-5cdfff8d037a"). InnerVolumeSpecName "kube-api-access-5dbgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.040390 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data" (OuterVolumeSpecName: "config-data") pod "77c41a0f-d539-496a-85f5-f6aec31747a9" (UID: "77c41a0f-d539-496a-85f5-f6aec31747a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.040770 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c47b7ce-4a43-460f-8386-d1541a901ea2" path="/var/lib/kubelet/pods/1c47b7ce-4a43-460f-8386-d1541a901ea2/volumes" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.050279 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.050426 4780 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.060045 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19201430-5ecc-4a0b-ad28-5cdfff8d037a" (UID: "19201430-5ecc-4a0b-ad28-5cdfff8d037a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.093349 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77c41a0f-d539-496a-85f5-f6aec31747a9-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.093481 4780 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.093501 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dbgc\" (UniqueName: \"kubernetes.io/projected/19201430-5ecc-4a0b-ad28-5cdfff8d037a-kube-api-access-5dbgc\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.093517 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.097666 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data" (OuterVolumeSpecName: "config-data") pod "19201430-5ecc-4a0b-ad28-5cdfff8d037a" (UID: "19201430-5ecc-4a0b-ad28-5cdfff8d037a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.174212 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.199832 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19201430-5ecc-4a0b-ad28-5cdfff8d037a-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.226852 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.632111 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerStarted","Data":"265e460fbafa435e604300f2eb169c1e05f9aee348896866346ad712de204dfb"} Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.636768 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" event={"ID":"19201430-5ecc-4a0b-ad28-5cdfff8d037a","Type":"ContainerDied","Data":"bef4f52749f8d014f7e3c3c3c0760e193426372dec7eb4e1491517da38add708"} Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.636845 4780 scope.go:117] "RemoveContainer" containerID="17f44b9919535fa55e47ec91ea4abc4f74f00d2e89680cb06d340a52b696ac25" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.636862 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-54f5bc9f87-vp45v" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.646757 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7bc68bff5f-xvzgg" event={"ID":"77c41a0f-d539-496a-85f5-f6aec31747a9","Type":"ContainerDied","Data":"85a8773a4f2e26c958af39d8523e5408587485475860847326a643cc87187ca1"} Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.646950 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7bc68bff5f-xvzgg" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.688628 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-54f5bc9f87-vp45v"] Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.702171 4780 scope.go:117] "RemoveContainer" containerID="b96b27bbc2aaeec41555ddb02793a2d4d03ca6571a8980464824c3158cc557f4" Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.710481 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-54f5bc9f87-vp45v"] Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.727603 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7bc68bff5f-xvzgg"] Dec 10 11:18:16 crc kubenswrapper[4780]: I1210 11:18:16.754851 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-7bc68bff5f-xvzgg"] Dec 10 11:18:17 crc kubenswrapper[4780]: I1210 11:18:17.061521 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:17 crc kubenswrapper[4780]: I1210 11:18:17.692718 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerStarted","Data":"3524f60b5ac763e3dfed56ecb35c16779feb6c66dbf3ffecfbece3bed2802838"} Dec 10 11:18:18 crc kubenswrapper[4780]: I1210 11:18:18.002647 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19201430-5ecc-4a0b-ad28-5cdfff8d037a" path="/var/lib/kubelet/pods/19201430-5ecc-4a0b-ad28-5cdfff8d037a/volumes" Dec 10 11:18:18 crc kubenswrapper[4780]: I1210 11:18:18.003541 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77c41a0f-d539-496a-85f5-f6aec31747a9" path="/var/lib/kubelet/pods/77c41a0f-d539-496a-85f5-f6aec31747a9/volumes" Dec 10 11:18:18 crc kubenswrapper[4780]: I1210 11:18:18.731284 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerStarted","Data":"b59e33255b2ab3d00300021af658f6cef33b4ad55f6ed95e369581b181e92bd1"} Dec 10 11:18:19 crc kubenswrapper[4780]: I1210 11:18:19.750213 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerStarted","Data":"cea57b5631ebbabea706e89c719e3c6301663a04dc57fc2c6d3038d10a9422eb"} Dec 10 11:18:19 crc kubenswrapper[4780]: I1210 11:18:19.959564 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:18:19 crc kubenswrapper[4780]: E1210 11:18:19.960165 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:18:20 crc kubenswrapper[4780]: I1210 11:18:20.811327 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerStarted","Data":"7a0280e3ac1a74621ecc734f590c368ba2156d95fee23949e9fecc630dcd6191"} Dec 10 11:18:20 crc kubenswrapper[4780]: I1210 11:18:20.811745 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="ceilometer-central-agent" containerID="cri-o://3524f60b5ac763e3dfed56ecb35c16779feb6c66dbf3ffecfbece3bed2802838" gracePeriod=30 Dec 10 11:18:20 crc kubenswrapper[4780]: I1210 11:18:20.811803 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="proxy-httpd" containerID="cri-o://7a0280e3ac1a74621ecc734f590c368ba2156d95fee23949e9fecc630dcd6191" gracePeriod=30 Dec 10 11:18:20 crc kubenswrapper[4780]: I1210 11:18:20.811901 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="ceilometer-notification-agent" containerID="cri-o://b59e33255b2ab3d00300021af658f6cef33b4ad55f6ed95e369581b181e92bd1" gracePeriod=30 Dec 10 11:18:20 crc kubenswrapper[4780]: I1210 11:18:20.811803 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="sg-core" containerID="cri-o://cea57b5631ebbabea706e89c719e3c6301663a04dc57fc2c6d3038d10a9422eb" gracePeriod=30 Dec 10 11:18:20 crc kubenswrapper[4780]: I1210 11:18:20.812212 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:18:20 crc kubenswrapper[4780]: I1210 11:18:20.841909 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.843685088 podStartE2EDuration="6.841881345s" podCreationTimestamp="2025-12-10 11:18:14 +0000 UTC" firstStartedPulling="2025-12-10 11:18:16.181852101 +0000 UTC m=+2001.035245534" lastFinishedPulling="2025-12-10 11:18:20.180048348 +0000 UTC m=+2005.033441791" observedRunningTime="2025-12-10 11:18:20.839757211 +0000 UTC m=+2005.693150654" watchObservedRunningTime="2025-12-10 11:18:20.841881345 +0000 UTC m=+2005.695274788" Dec 10 11:18:21 crc kubenswrapper[4780]: E1210 11:18:21.173544 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7731ed4_cbc6_4007_a419_c5456956d232.slice/crio-conmon-7a0280e3ac1a74621ecc734f590c368ba2156d95fee23949e9fecc630dcd6191.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7731ed4_cbc6_4007_a419_c5456956d232.slice/crio-7a0280e3ac1a74621ecc734f590c368ba2156d95fee23949e9fecc630dcd6191.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:18:22 crc kubenswrapper[4780]: I1210 11:18:22.117444 4780 generic.go:334] "Generic (PLEG): container finished" podID="d7731ed4-cbc6-4007-a419-c5456956d232" containerID="7a0280e3ac1a74621ecc734f590c368ba2156d95fee23949e9fecc630dcd6191" exitCode=0 Dec 10 11:18:22 crc kubenswrapper[4780]: I1210 11:18:22.117500 4780 generic.go:334] "Generic (PLEG): container finished" podID="d7731ed4-cbc6-4007-a419-c5456956d232" containerID="cea57b5631ebbabea706e89c719e3c6301663a04dc57fc2c6d3038d10a9422eb" exitCode=2 Dec 10 11:18:22 crc kubenswrapper[4780]: I1210 11:18:22.117518 4780 generic.go:334] "Generic (PLEG): container finished" podID="d7731ed4-cbc6-4007-a419-c5456956d232" containerID="b59e33255b2ab3d00300021af658f6cef33b4ad55f6ed95e369581b181e92bd1" exitCode=0 Dec 10 11:18:22 crc kubenswrapper[4780]: I1210 11:18:22.117541 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerDied","Data":"7a0280e3ac1a74621ecc734f590c368ba2156d95fee23949e9fecc630dcd6191"} Dec 10 11:18:22 crc kubenswrapper[4780]: I1210 11:18:22.117619 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerDied","Data":"cea57b5631ebbabea706e89c719e3c6301663a04dc57fc2c6d3038d10a9422eb"} Dec 10 11:18:22 crc kubenswrapper[4780]: I1210 11:18:22.117632 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerDied","Data":"b59e33255b2ab3d00300021af658f6cef33b4ad55f6ed95e369581b181e92bd1"} Dec 10 11:18:31 crc kubenswrapper[4780]: I1210 11:18:31.616318 4780 generic.go:334] "Generic (PLEG): container finished" podID="d7731ed4-cbc6-4007-a419-c5456956d232" containerID="3524f60b5ac763e3dfed56ecb35c16779feb6c66dbf3ffecfbece3bed2802838" exitCode=0 Dec 10 11:18:31 crc kubenswrapper[4780]: I1210 11:18:31.616701 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerDied","Data":"3524f60b5ac763e3dfed56ecb35c16779feb6c66dbf3ffecfbece3bed2802838"} Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.036835 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.064009 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-combined-ca-bundle\") pod \"d7731ed4-cbc6-4007-a419-c5456956d232\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.064501 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-config-data\") pod \"d7731ed4-cbc6-4007-a419-c5456956d232\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.064586 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-scripts\") pod \"d7731ed4-cbc6-4007-a419-c5456956d232\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.064618 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-log-httpd\") pod \"d7731ed4-cbc6-4007-a419-c5456956d232\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.064638 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-run-httpd\") pod \"d7731ed4-cbc6-4007-a419-c5456956d232\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.064718 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-sg-core-conf-yaml\") pod \"d7731ed4-cbc6-4007-a419-c5456956d232\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.064749 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45vm9\" (UniqueName: \"kubernetes.io/projected/d7731ed4-cbc6-4007-a419-c5456956d232-kube-api-access-45vm9\") pod \"d7731ed4-cbc6-4007-a419-c5456956d232\" (UID: \"d7731ed4-cbc6-4007-a419-c5456956d232\") " Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.065744 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d7731ed4-cbc6-4007-a419-c5456956d232" (UID: "d7731ed4-cbc6-4007-a419-c5456956d232"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.065878 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d7731ed4-cbc6-4007-a419-c5456956d232" (UID: "d7731ed4-cbc6-4007-a419-c5456956d232"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.076800 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-scripts" (OuterVolumeSpecName: "scripts") pod "d7731ed4-cbc6-4007-a419-c5456956d232" (UID: "d7731ed4-cbc6-4007-a419-c5456956d232"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.082003 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7731ed4-cbc6-4007-a419-c5456956d232-kube-api-access-45vm9" (OuterVolumeSpecName: "kube-api-access-45vm9") pod "d7731ed4-cbc6-4007-a419-c5456956d232" (UID: "d7731ed4-cbc6-4007-a419-c5456956d232"). InnerVolumeSpecName "kube-api-access-45vm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.128772 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d7731ed4-cbc6-4007-a419-c5456956d232" (UID: "d7731ed4-cbc6-4007-a419-c5456956d232"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.171477 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.171537 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.171553 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d7731ed4-cbc6-4007-a419-c5456956d232-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.171562 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.171577 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45vm9\" (UniqueName: \"kubernetes.io/projected/d7731ed4-cbc6-4007-a419-c5456956d232-kube-api-access-45vm9\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.231828 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7731ed4-cbc6-4007-a419-c5456956d232" (UID: "d7731ed4-cbc6-4007-a419-c5456956d232"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.269665 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-config-data" (OuterVolumeSpecName: "config-data") pod "d7731ed4-cbc6-4007-a419-c5456956d232" (UID: "d7731ed4-cbc6-4007-a419-c5456956d232"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.274693 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.274775 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7731ed4-cbc6-4007-a419-c5456956d232-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.638265 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d7731ed4-cbc6-4007-a419-c5456956d232","Type":"ContainerDied","Data":"265e460fbafa435e604300f2eb169c1e05f9aee348896866346ad712de204dfb"} Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.638741 4780 scope.go:117] "RemoveContainer" containerID="7a0280e3ac1a74621ecc734f590c368ba2156d95fee23949e9fecc630dcd6191" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.639113 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.692974 4780 scope.go:117] "RemoveContainer" containerID="cea57b5631ebbabea706e89c719e3c6301663a04dc57fc2c6d3038d10a9422eb" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.726507 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.744952 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.759973 4780 scope.go:117] "RemoveContainer" containerID="b59e33255b2ab3d00300021af658f6cef33b4ad55f6ed95e369581b181e92bd1" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.761096 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:32 crc kubenswrapper[4780]: E1210 11:18:32.761818 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="ceilometer-central-agent" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.761836 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="ceilometer-central-agent" Dec 10 11:18:32 crc kubenswrapper[4780]: E1210 11:18:32.761866 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="proxy-httpd" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.761874 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="proxy-httpd" Dec 10 11:18:32 crc kubenswrapper[4780]: E1210 11:18:32.761892 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="ceilometer-notification-agent" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.761900 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="ceilometer-notification-agent" Dec 10 11:18:32 crc kubenswrapper[4780]: E1210 11:18:32.761913 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77c41a0f-d539-496a-85f5-f6aec31747a9" containerName="heat-api" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.761938 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="77c41a0f-d539-496a-85f5-f6aec31747a9" containerName="heat-api" Dec 10 11:18:32 crc kubenswrapper[4780]: E1210 11:18:32.761957 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="sg-core" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.761965 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="sg-core" Dec 10 11:18:32 crc kubenswrapper[4780]: E1210 11:18:32.761987 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19201430-5ecc-4a0b-ad28-5cdfff8d037a" containerName="heat-cfnapi" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.761995 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="19201430-5ecc-4a0b-ad28-5cdfff8d037a" containerName="heat-cfnapi" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.762300 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="19201430-5ecc-4a0b-ad28-5cdfff8d037a" containerName="heat-cfnapi" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.762320 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="ceilometer-central-agent" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.762338 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="proxy-httpd" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.762347 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="ceilometer-notification-agent" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.762358 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" containerName="sg-core" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.762367 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="77c41a0f-d539-496a-85f5-f6aec31747a9" containerName="heat-api" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.765481 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.772358 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.802116 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.805808 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.805877 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.805963 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-config-data\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.806086 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-run-httpd\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.806129 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-log-httpd\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.806240 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-scripts\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.806550 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt84q\" (UniqueName: \"kubernetes.io/projected/929349d7-3f7f-4e86-969d-2fa2b530959d-kube-api-access-pt84q\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.823478 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.848582 4780 scope.go:117] "RemoveContainer" containerID="3524f60b5ac763e3dfed56ecb35c16779feb6c66dbf3ffecfbece3bed2802838" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.909202 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt84q\" (UniqueName: \"kubernetes.io/projected/929349d7-3f7f-4e86-969d-2fa2b530959d-kube-api-access-pt84q\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.909324 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.909361 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.909397 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-config-data\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.909473 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-run-httpd\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.909516 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-log-httpd\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.909590 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-scripts\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.917656 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.918618 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-run-httpd\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.927094 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-log-httpd\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.928192 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-scripts\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.944472 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-config-data\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.979430 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:32 crc kubenswrapper[4780]: I1210 11:18:32.996509 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:18:32 crc kubenswrapper[4780]: E1210 11:18:32.997726 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:18:33 crc kubenswrapper[4780]: I1210 11:18:33.008441 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt84q\" (UniqueName: \"kubernetes.io/projected/929349d7-3f7f-4e86-969d-2fa2b530959d-kube-api-access-pt84q\") pod \"ceilometer-0\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " pod="openstack/ceilometer-0" Dec 10 11:18:33 crc kubenswrapper[4780]: I1210 11:18:33.163682 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:33 crc kubenswrapper[4780]: I1210 11:18:33.838905 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:33 crc kubenswrapper[4780]: W1210 11:18:33.842878 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod929349d7_3f7f_4e86_969d_2fa2b530959d.slice/crio-cf36ed1d16a778b258149ebbae534404e31b1733c94d5bcb4ebe7c25808f4c95 WatchSource:0}: Error finding container cf36ed1d16a778b258149ebbae534404e31b1733c94d5bcb4ebe7c25808f4c95: Status 404 returned error can't find the container with id cf36ed1d16a778b258149ebbae534404e31b1733c94d5bcb4ebe7c25808f4c95 Dec 10 11:18:34 crc kubenswrapper[4780]: I1210 11:18:34.434436 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7731ed4-cbc6-4007-a419-c5456956d232" path="/var/lib/kubelet/pods/d7731ed4-cbc6-4007-a419-c5456956d232/volumes" Dec 10 11:18:34 crc kubenswrapper[4780]: I1210 11:18:34.684780 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerStarted","Data":"cf36ed1d16a778b258149ebbae534404e31b1733c94d5bcb4ebe7c25808f4c95"} Dec 10 11:18:36 crc kubenswrapper[4780]: I1210 11:18:36.726340 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerStarted","Data":"9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240"} Dec 10 11:18:37 crc kubenswrapper[4780]: I1210 11:18:37.745884 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerStarted","Data":"09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c"} Dec 10 11:18:38 crc kubenswrapper[4780]: I1210 11:18:38.767634 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerStarted","Data":"d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5"} Dec 10 11:18:38 crc kubenswrapper[4780]: I1210 11:18:38.930259 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:39 crc kubenswrapper[4780]: I1210 11:18:39.791599 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerStarted","Data":"f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41"} Dec 10 11:18:39 crc kubenswrapper[4780]: I1210 11:18:39.791832 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="ceilometer-central-agent" containerID="cri-o://9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240" gracePeriod=30 Dec 10 11:18:39 crc kubenswrapper[4780]: I1210 11:18:39.791890 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="sg-core" containerID="cri-o://d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5" gracePeriod=30 Dec 10 11:18:39 crc kubenswrapper[4780]: I1210 11:18:39.792381 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:18:39 crc kubenswrapper[4780]: I1210 11:18:39.791964 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="proxy-httpd" containerID="cri-o://f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41" gracePeriod=30 Dec 10 11:18:39 crc kubenswrapper[4780]: I1210 11:18:39.791951 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="ceilometer-notification-agent" containerID="cri-o://09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c" gracePeriod=30 Dec 10 11:18:39 crc kubenswrapper[4780]: I1210 11:18:39.836741 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.116540574 podStartE2EDuration="7.836707075s" podCreationTimestamp="2025-12-10 11:18:32 +0000 UTC" firstStartedPulling="2025-12-10 11:18:34.319734562 +0000 UTC m=+2019.173128005" lastFinishedPulling="2025-12-10 11:18:39.039901063 +0000 UTC m=+2023.893294506" observedRunningTime="2025-12-10 11:18:39.826289379 +0000 UTC m=+2024.679682822" watchObservedRunningTime="2025-12-10 11:18:39.836707075 +0000 UTC m=+2024.690100508" Dec 10 11:18:40 crc kubenswrapper[4780]: I1210 11:18:40.809335 4780 generic.go:334] "Generic (PLEG): container finished" podID="a94e2b82-6087-4791-8f15-e1ca9e25028b" containerID="c36d091e25c263274562bbc15d2b6e015d2fa99fce1a12ab74953cb7b4d4452b" exitCode=0 Dec 10 11:18:40 crc kubenswrapper[4780]: I1210 11:18:40.809451 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-slw88" event={"ID":"a94e2b82-6087-4791-8f15-e1ca9e25028b","Type":"ContainerDied","Data":"c36d091e25c263274562bbc15d2b6e015d2fa99fce1a12ab74953cb7b4d4452b"} Dec 10 11:18:40 crc kubenswrapper[4780]: I1210 11:18:40.815226 4780 generic.go:334] "Generic (PLEG): container finished" podID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerID="f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41" exitCode=0 Dec 10 11:18:40 crc kubenswrapper[4780]: I1210 11:18:40.815299 4780 generic.go:334] "Generic (PLEG): container finished" podID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerID="d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5" exitCode=2 Dec 10 11:18:40 crc kubenswrapper[4780]: I1210 11:18:40.815313 4780 generic.go:334] "Generic (PLEG): container finished" podID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerID="09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c" exitCode=0 Dec 10 11:18:40 crc kubenswrapper[4780]: I1210 11:18:40.815302 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerDied","Data":"f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41"} Dec 10 11:18:40 crc kubenswrapper[4780]: I1210 11:18:40.815387 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerDied","Data":"d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5"} Dec 10 11:18:40 crc kubenswrapper[4780]: I1210 11:18:40.815400 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerDied","Data":"09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c"} Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.561452 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.579799 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-scripts\") pod \"a94e2b82-6087-4791-8f15-e1ca9e25028b\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.579956 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-combined-ca-bundle\") pod \"a94e2b82-6087-4791-8f15-e1ca9e25028b\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.580165 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crhlm\" (UniqueName: \"kubernetes.io/projected/a94e2b82-6087-4791-8f15-e1ca9e25028b-kube-api-access-crhlm\") pod \"a94e2b82-6087-4791-8f15-e1ca9e25028b\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.580219 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-config-data\") pod \"a94e2b82-6087-4791-8f15-e1ca9e25028b\" (UID: \"a94e2b82-6087-4791-8f15-e1ca9e25028b\") " Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.594952 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-scripts" (OuterVolumeSpecName: "scripts") pod "a94e2b82-6087-4791-8f15-e1ca9e25028b" (UID: "a94e2b82-6087-4791-8f15-e1ca9e25028b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.595427 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a94e2b82-6087-4791-8f15-e1ca9e25028b-kube-api-access-crhlm" (OuterVolumeSpecName: "kube-api-access-crhlm") pod "a94e2b82-6087-4791-8f15-e1ca9e25028b" (UID: "a94e2b82-6087-4791-8f15-e1ca9e25028b"). InnerVolumeSpecName "kube-api-access-crhlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.656156 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a94e2b82-6087-4791-8f15-e1ca9e25028b" (UID: "a94e2b82-6087-4791-8f15-e1ca9e25028b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.695354 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.695428 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.695442 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crhlm\" (UniqueName: \"kubernetes.io/projected/a94e2b82-6087-4791-8f15-e1ca9e25028b-kube-api-access-crhlm\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.703490 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-config-data" (OuterVolumeSpecName: "config-data") pod "a94e2b82-6087-4791-8f15-e1ca9e25028b" (UID: "a94e2b82-6087-4791-8f15-e1ca9e25028b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.798200 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a94e2b82-6087-4791-8f15-e1ca9e25028b-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.850125 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-slw88" event={"ID":"a94e2b82-6087-4791-8f15-e1ca9e25028b","Type":"ContainerDied","Data":"9685a6b9643a6f71a137f67a2bea083504c69a33470941fc116fa686e3a4e02f"} Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.850209 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9685a6b9643a6f71a137f67a2bea083504c69a33470941fc116fa686e3a4e02f" Dec 10 11:18:42 crc kubenswrapper[4780]: I1210 11:18:42.850309 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-slw88" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.056796 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 11:18:43 crc kubenswrapper[4780]: E1210 11:18:43.058197 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a94e2b82-6087-4791-8f15-e1ca9e25028b" containerName="nova-cell0-conductor-db-sync" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.058222 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a94e2b82-6087-4791-8f15-e1ca9e25028b" containerName="nova-cell0-conductor-db-sync" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.058567 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="a94e2b82-6087-4791-8f15-e1ca9e25028b" containerName="nova-cell0-conductor-db-sync" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.059710 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.069654 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.071797 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-2tcc2" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.091636 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.111136 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6f72afd-0015-45dd-9f11-b777b4a99211-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.111334 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxr9t\" (UniqueName: \"kubernetes.io/projected/a6f72afd-0015-45dd-9f11-b777b4a99211-kube-api-access-pxr9t\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.111394 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6f72afd-0015-45dd-9f11-b777b4a99211-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.214143 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6f72afd-0015-45dd-9f11-b777b4a99211-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.214262 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxr9t\" (UniqueName: \"kubernetes.io/projected/a6f72afd-0015-45dd-9f11-b777b4a99211-kube-api-access-pxr9t\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.214306 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6f72afd-0015-45dd-9f11-b777b4a99211-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.226288 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6f72afd-0015-45dd-9f11-b777b4a99211-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.226344 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6f72afd-0015-45dd-9f11-b777b4a99211-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.238807 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxr9t\" (UniqueName: \"kubernetes.io/projected/a6f72afd-0015-45dd-9f11-b777b4a99211-kube-api-access-pxr9t\") pod \"nova-cell0-conductor-0\" (UID: \"a6f72afd-0015-45dd-9f11-b777b4a99211\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:43 crc kubenswrapper[4780]: I1210 11:18:43.393744 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:44 crc kubenswrapper[4780]: I1210 11:18:44.021845 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 11:18:44 crc kubenswrapper[4780]: I1210 11:18:44.913518 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"a6f72afd-0015-45dd-9f11-b777b4a99211","Type":"ContainerStarted","Data":"621fda63f4521caf7b4e253e6ee82086e05991b31a899172c6ae086c07b0ee69"} Dec 10 11:18:44 crc kubenswrapper[4780]: I1210 11:18:44.913953 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"a6f72afd-0015-45dd-9f11-b777b4a99211","Type":"ContainerStarted","Data":"2fa10ea0a052ca8aa4b0e8112fbed2dcf2838e0e8e600dcc136739a9e6386d07"} Dec 10 11:18:44 crc kubenswrapper[4780]: I1210 11:18:44.917676 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:44 crc kubenswrapper[4780]: I1210 11:18:44.954002 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.95397444 podStartE2EDuration="1.95397444s" podCreationTimestamp="2025-12-10 11:18:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:18:44.953197021 +0000 UTC m=+2029.806590474" watchObservedRunningTime="2025-12-10 11:18:44.95397444 +0000 UTC m=+2029.807367883" Dec 10 11:18:47 crc kubenswrapper[4780]: I1210 11:18:47.961244 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:18:47 crc kubenswrapper[4780]: E1210 11:18:47.962610 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.806636 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.861275 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-sg-core-conf-yaml\") pod \"929349d7-3f7f-4e86-969d-2fa2b530959d\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.861355 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-log-httpd\") pod \"929349d7-3f7f-4e86-969d-2fa2b530959d\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.861457 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-combined-ca-bundle\") pod \"929349d7-3f7f-4e86-969d-2fa2b530959d\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.861539 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-run-httpd\") pod \"929349d7-3f7f-4e86-969d-2fa2b530959d\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.861570 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-scripts\") pod \"929349d7-3f7f-4e86-969d-2fa2b530959d\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.861698 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-config-data\") pod \"929349d7-3f7f-4e86-969d-2fa2b530959d\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.861803 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pt84q\" (UniqueName: \"kubernetes.io/projected/929349d7-3f7f-4e86-969d-2fa2b530959d-kube-api-access-pt84q\") pod \"929349d7-3f7f-4e86-969d-2fa2b530959d\" (UID: \"929349d7-3f7f-4e86-969d-2fa2b530959d\") " Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.862058 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "929349d7-3f7f-4e86-969d-2fa2b530959d" (UID: "929349d7-3f7f-4e86-969d-2fa2b530959d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.862680 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.863134 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "929349d7-3f7f-4e86-969d-2fa2b530959d" (UID: "929349d7-3f7f-4e86-969d-2fa2b530959d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.870293 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-scripts" (OuterVolumeSpecName: "scripts") pod "929349d7-3f7f-4e86-969d-2fa2b530959d" (UID: "929349d7-3f7f-4e86-969d-2fa2b530959d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.880526 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/929349d7-3f7f-4e86-969d-2fa2b530959d-kube-api-access-pt84q" (OuterVolumeSpecName: "kube-api-access-pt84q") pod "929349d7-3f7f-4e86-969d-2fa2b530959d" (UID: "929349d7-3f7f-4e86-969d-2fa2b530959d"). InnerVolumeSpecName "kube-api-access-pt84q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.908674 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "929349d7-3f7f-4e86-969d-2fa2b530959d" (UID: "929349d7-3f7f-4e86-969d-2fa2b530959d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.965132 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/929349d7-3f7f-4e86-969d-2fa2b530959d-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.965514 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.965602 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pt84q\" (UniqueName: \"kubernetes.io/projected/929349d7-3f7f-4e86-969d-2fa2b530959d-kube-api-access-pt84q\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.965731 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:50 crc kubenswrapper[4780]: I1210 11:18:50.978046 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "929349d7-3f7f-4e86-969d-2fa2b530959d" (UID: "929349d7-3f7f-4e86-969d-2fa2b530959d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.031803 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-config-data" (OuterVolumeSpecName: "config-data") pod "929349d7-3f7f-4e86-969d-2fa2b530959d" (UID: "929349d7-3f7f-4e86-969d-2fa2b530959d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.069378 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.069443 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/929349d7-3f7f-4e86-969d-2fa2b530959d-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.094048 4780 generic.go:334] "Generic (PLEG): container finished" podID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerID="9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240" exitCode=0 Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.094121 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerDied","Data":"9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240"} Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.094168 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"929349d7-3f7f-4e86-969d-2fa2b530959d","Type":"ContainerDied","Data":"cf36ed1d16a778b258149ebbae534404e31b1733c94d5bcb4ebe7c25808f4c95"} Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.094189 4780 scope.go:117] "RemoveContainer" containerID="f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.094798 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.127729 4780 scope.go:117] "RemoveContainer" containerID="d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.164848 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.180181 4780 scope.go:117] "RemoveContainer" containerID="09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.186411 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.202065 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:51 crc kubenswrapper[4780]: E1210 11:18:51.203102 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="ceilometer-central-agent" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.203134 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="ceilometer-central-agent" Dec 10 11:18:51 crc kubenswrapper[4780]: E1210 11:18:51.203163 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="sg-core" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.203172 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="sg-core" Dec 10 11:18:51 crc kubenswrapper[4780]: E1210 11:18:51.203201 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="proxy-httpd" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.203207 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="proxy-httpd" Dec 10 11:18:51 crc kubenswrapper[4780]: E1210 11:18:51.203243 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="ceilometer-notification-agent" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.203250 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="ceilometer-notification-agent" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.203563 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="sg-core" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.203583 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="ceilometer-notification-agent" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.203590 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="proxy-httpd" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.203597 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" containerName="ceilometer-central-agent" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.206868 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.214275 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.220416 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.233387 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.268178 4780 scope.go:117] "RemoveContainer" containerID="9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.273457 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-config-data\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.273947 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-scripts\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.273997 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.274025 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-run-httpd\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.274065 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-log-httpd\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.274113 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4bdb\" (UniqueName: \"kubernetes.io/projected/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-kube-api-access-r4bdb\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.274193 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.314882 4780 scope.go:117] "RemoveContainer" containerID="f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41" Dec 10 11:18:51 crc kubenswrapper[4780]: E1210 11:18:51.316459 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41\": container with ID starting with f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41 not found: ID does not exist" containerID="f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.316526 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41"} err="failed to get container status \"f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41\": rpc error: code = NotFound desc = could not find container \"f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41\": container with ID starting with f395fdea66d140cf1480a80c2e3bfa7dc9feef4d4505b7f36bc5153608cbeb41 not found: ID does not exist" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.316562 4780 scope.go:117] "RemoveContainer" containerID="d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5" Dec 10 11:18:51 crc kubenswrapper[4780]: E1210 11:18:51.317268 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5\": container with ID starting with d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5 not found: ID does not exist" containerID="d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.317317 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5"} err="failed to get container status \"d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5\": rpc error: code = NotFound desc = could not find container \"d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5\": container with ID starting with d7f6a943bbd2125956bad03f2ea026f6986351aa6056a9da12c9c53c93b4afa5 not found: ID does not exist" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.317354 4780 scope.go:117] "RemoveContainer" containerID="09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c" Dec 10 11:18:51 crc kubenswrapper[4780]: E1210 11:18:51.317686 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c\": container with ID starting with 09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c not found: ID does not exist" containerID="09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.317717 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c"} err="failed to get container status \"09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c\": rpc error: code = NotFound desc = could not find container \"09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c\": container with ID starting with 09ef7f687c48a4756a91e07f1dade46156eadbee9d1a835343f4a97a2043572c not found: ID does not exist" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.317740 4780 scope.go:117] "RemoveContainer" containerID="9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240" Dec 10 11:18:51 crc kubenswrapper[4780]: E1210 11:18:51.321220 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240\": container with ID starting with 9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240 not found: ID does not exist" containerID="9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.321284 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240"} err="failed to get container status \"9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240\": rpc error: code = NotFound desc = could not find container \"9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240\": container with ID starting with 9ad02ab966f41dbeb22e03a0b840ff3c9d0de72b4b7171a1a063d635c73ca240 not found: ID does not exist" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.376470 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-scripts\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.376549 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.376572 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-run-httpd\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.376597 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-log-httpd\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.376635 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4bdb\" (UniqueName: \"kubernetes.io/projected/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-kube-api-access-r4bdb\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.376696 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.376835 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-config-data\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.377651 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-log-httpd\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.377663 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-run-httpd\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.383972 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-scripts\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.384759 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.385669 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.386263 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-config-data\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.407225 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4bdb\" (UniqueName: \"kubernetes.io/projected/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-kube-api-access-r4bdb\") pod \"ceilometer-0\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.494551 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-ccvcx"] Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.504584 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.524081 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-ccvcx"] Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.550040 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.590393 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8ch5\" (UniqueName: \"kubernetes.io/projected/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-kube-api-access-r8ch5\") pod \"aodh-db-create-ccvcx\" (UID: \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\") " pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.591014 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-operator-scripts\") pod \"aodh-db-create-ccvcx\" (UID: \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\") " pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.679097 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-3287-account-create-update-plmqg"] Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.681945 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.700481 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.705033 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-operator-scripts\") pod \"aodh-db-create-ccvcx\" (UID: \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\") " pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.707984 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-operator-scripts\") pod \"aodh-db-create-ccvcx\" (UID: \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\") " pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.739171 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-operator-scripts\") pod \"aodh-3287-account-create-update-plmqg\" (UID: \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\") " pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.739392 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k27zk\" (UniqueName: \"kubernetes.io/projected/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-kube-api-access-k27zk\") pod \"aodh-3287-account-create-update-plmqg\" (UID: \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\") " pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.739683 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8ch5\" (UniqueName: \"kubernetes.io/projected/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-kube-api-access-r8ch5\") pod \"aodh-db-create-ccvcx\" (UID: \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\") " pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.754018 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-3287-account-create-update-plmqg"] Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.779474 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8ch5\" (UniqueName: \"kubernetes.io/projected/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-kube-api-access-r8ch5\") pod \"aodh-db-create-ccvcx\" (UID: \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\") " pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.848213 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-operator-scripts\") pod \"aodh-3287-account-create-update-plmqg\" (UID: \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\") " pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.848336 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k27zk\" (UniqueName: \"kubernetes.io/projected/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-kube-api-access-k27zk\") pod \"aodh-3287-account-create-update-plmqg\" (UID: \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\") " pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.850462 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-operator-scripts\") pod \"aodh-3287-account-create-update-plmqg\" (UID: \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\") " pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.894877 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:51 crc kubenswrapper[4780]: I1210 11:18:51.897744 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k27zk\" (UniqueName: \"kubernetes.io/projected/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-kube-api-access-k27zk\") pod \"aodh-3287-account-create-update-plmqg\" (UID: \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\") " pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:52 crc kubenswrapper[4780]: I1210 11:18:52.123631 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="929349d7-3f7f-4e86-969d-2fa2b530959d" path="/var/lib/kubelet/pods/929349d7-3f7f-4e86-969d-2fa2b530959d/volumes" Dec 10 11:18:52 crc kubenswrapper[4780]: I1210 11:18:52.199355 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:52 crc kubenswrapper[4780]: I1210 11:18:52.603287 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:18:52 crc kubenswrapper[4780]: I1210 11:18:52.839421 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-ccvcx"] Dec 10 11:18:53 crc kubenswrapper[4780]: I1210 11:18:53.085489 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-3287-account-create-update-plmqg"] Dec 10 11:18:53 crc kubenswrapper[4780]: I1210 11:18:53.267191 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3287-account-create-update-plmqg" event={"ID":"81f2ad5f-8eec-47e0-9958-ec5f0be03fae","Type":"ContainerStarted","Data":"49e99385bd9187f2b406fcd4c27c6cb4738a826adf59c2a580bd7c95487ef42d"} Dec 10 11:18:53 crc kubenswrapper[4780]: I1210 11:18:53.271675 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerStarted","Data":"afbe7c8e397a335b45f9376293edaba33b95bb534ada30c8e33eb854ad43ba39"} Dec 10 11:18:53 crc kubenswrapper[4780]: I1210 11:18:53.274429 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-ccvcx" event={"ID":"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6","Type":"ContainerStarted","Data":"7b2a6371dc15b316aada82df6419122cdb9ee3ef98f065d428c64908c5deaa48"} Dec 10 11:18:53 crc kubenswrapper[4780]: I1210 11:18:53.274472 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-ccvcx" event={"ID":"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6","Type":"ContainerStarted","Data":"8c3dbaacb59577b561ee3487f1959941d381e778f421909d35e539deab533765"} Dec 10 11:18:53 crc kubenswrapper[4780]: I1210 11:18:53.302217 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-ccvcx" podStartSLOduration=2.30218796 podStartE2EDuration="2.30218796s" podCreationTimestamp="2025-12-10 11:18:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:18:53.296968207 +0000 UTC m=+2038.150361650" watchObservedRunningTime="2025-12-10 11:18:53.30218796 +0000 UTC m=+2038.155581403" Dec 10 11:18:53 crc kubenswrapper[4780]: I1210 11:18:53.461458 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.190814 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-ff8ld"] Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.194496 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.218841 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.219203 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.226428 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-ff8ld"] Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.246807 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.246956 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjlxd\" (UniqueName: \"kubernetes.io/projected/75ef71d6-20b7-40b6-83c3-b3ee314c827f-kube-api-access-pjlxd\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.247169 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-config-data\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.247209 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-scripts\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.350612 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.351350 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjlxd\" (UniqueName: \"kubernetes.io/projected/75ef71d6-20b7-40b6-83c3-b3ee314c827f-kube-api-access-pjlxd\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.351781 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-config-data\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.351839 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-scripts\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.375261 4780 generic.go:334] "Generic (PLEG): container finished" podID="81f2ad5f-8eec-47e0-9958-ec5f0be03fae" containerID="a200422138b0eb32a34b61428752536bc17b969f3a2813a5dd548f16f980de37" exitCode=0 Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.375443 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3287-account-create-update-plmqg" event={"ID":"81f2ad5f-8eec-47e0-9958-ec5f0be03fae","Type":"ContainerDied","Data":"a200422138b0eb32a34b61428752536bc17b969f3a2813a5dd548f16f980de37"} Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.380156 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-config-data\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.381361 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-scripts\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.404903 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerStarted","Data":"5a56e3544792de1bbaa784b65daf1709ada6580e213bda0061f7ab21c38923d6"} Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.424173 4780 generic.go:334] "Generic (PLEG): container finished" podID="c52cc2d2-45a5-4e32-8320-db28a4a5b5c6" containerID="7b2a6371dc15b316aada82df6419122cdb9ee3ef98f065d428c64908c5deaa48" exitCode=0 Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.424267 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-ccvcx" event={"ID":"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6","Type":"ContainerDied","Data":"7b2a6371dc15b316aada82df6419122cdb9ee3ef98f065d428c64908c5deaa48"} Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.433896 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjlxd\" (UniqueName: \"kubernetes.io/projected/75ef71d6-20b7-40b6-83c3-b3ee314c827f-kube-api-access-pjlxd\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.447832 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ff8ld\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.654062 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.657419 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.665780 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.667273 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.799355 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvrfb\" (UniqueName: \"kubernetes.io/projected/3264f745-f5c2-460e-b5e7-86eef3f26673-kube-api-access-wvrfb\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.799474 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3264f745-f5c2-460e-b5e7-86eef3f26673-logs\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.799546 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.799579 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-config-data\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.812154 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.865617 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.868905 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.879222 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.917252 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvrfb\" (UniqueName: \"kubernetes.io/projected/3264f745-f5c2-460e-b5e7-86eef3f26673-kube-api-access-wvrfb\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.917601 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3264f745-f5c2-460e-b5e7-86eef3f26673-logs\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.917792 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.917868 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-config-data\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.925089 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3264f745-f5c2-460e-b5e7-86eef3f26673-logs\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.943656 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-config-data\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:54 crc kubenswrapper[4780]: I1210 11:18:54.945229 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.038726 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvrfb\" (UniqueName: \"kubernetes.io/projected/3264f745-f5c2-460e-b5e7-86eef3f26673-kube-api-access-wvrfb\") pod \"nova-api-0\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " pod="openstack/nova-api-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.113161 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27md9\" (UniqueName: \"kubernetes.io/projected/c38ca976-0284-49c1-836c-21f1a7d5354a-kube-api-access-27md9\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.113556 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-config-data\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.113848 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.214659 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.282127 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27md9\" (UniqueName: \"kubernetes.io/projected/c38ca976-0284-49c1-836c-21f1a7d5354a-kube-api-access-27md9\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.282512 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-config-data\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.282788 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.335517 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.411447 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-config-data\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.429830 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27md9\" (UniqueName: \"kubernetes.io/projected/c38ca976-0284-49c1-836c-21f1a7d5354a-kube-api-access-27md9\") pod \"nova-scheduler-0\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.572873 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.578965 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.605421 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.663643 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.706321 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.707515 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/623968d1-d9da-4f8b-9501-2413ef496231-logs\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.707576 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-config-data\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.707601 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vb8l\" (UniqueName: \"kubernetes.io/projected/623968d1-d9da-4f8b-9501-2413ef496231-kube-api-access-8vb8l\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.722024 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.738549 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.810326 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.821084 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.829545 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/623968d1-d9da-4f8b-9501-2413ef496231-logs\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.829747 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-config-data\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.829891 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vb8l\" (UniqueName: \"kubernetes.io/projected/623968d1-d9da-4f8b-9501-2413ef496231-kube-api-access-8vb8l\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.831000 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/623968d1-d9da-4f8b-9501-2413ef496231-logs\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.832185 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.852990 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.863200 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-config-data\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.878160 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.915443 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.921804 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-l52cn"] Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.925197 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vb8l\" (UniqueName: \"kubernetes.io/projected/623968d1-d9da-4f8b-9501-2413ef496231-kube-api-access-8vb8l\") pod \"nova-metadata-0\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " pod="openstack/nova-metadata-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.930551 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.936655 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.936808 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lm8g\" (UniqueName: \"kubernetes.io/projected/5e8191e7-63a9-42d8-9049-b315f1e86c2d-kube-api-access-5lm8g\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:55 crc kubenswrapper[4780]: I1210 11:18:55.937397 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.047479 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-config\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.048313 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.048426 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.048522 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lm8g\" (UniqueName: \"kubernetes.io/projected/5e8191e7-63a9-42d8-9049-b315f1e86c2d-kube-api-access-5lm8g\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.048607 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-svc\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.048645 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.049021 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxglv\" (UniqueName: \"kubernetes.io/projected/b931e42a-a6d4-4b05-996c-f18454ddab28-kube-api-access-lxglv\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.049123 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.049261 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.085703 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.093031 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.093094 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-l52cn"] Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.105911 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.110738 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.133219 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lm8g\" (UniqueName: \"kubernetes.io/projected/5e8191e7-63a9-42d8-9049-b315f1e86c2d-kube-api-access-5lm8g\") pod \"nova-cell1-novncproxy-0\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.176620 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxglv\" (UniqueName: \"kubernetes.io/projected/b931e42a-a6d4-4b05-996c-f18454ddab28-kube-api-access-lxglv\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.176816 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.177653 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-config\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.178026 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.178169 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-svc\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.178203 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.178824 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.180512 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.180620 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-svc\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.180811 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-config\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.180865 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.195510 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.228629 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxglv\" (UniqueName: \"kubernetes.io/projected/b931e42a-a6d4-4b05-996c-f18454ddab28-kube-api-access-lxglv\") pod \"dnsmasq-dns-9b86998b5-l52cn\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.515268 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:18:56 crc kubenswrapper[4780]: I1210 11:18:56.662853 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerStarted","Data":"293c85f4d792da1145749fc4363a71f5fd7a2163692dc028fb00c99011b2c3bf"} Dec 10 11:18:57 crc kubenswrapper[4780]: I1210 11:18:57.375492 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-ff8ld"] Dec 10 11:18:57 crc kubenswrapper[4780]: I1210 11:18:57.781053 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-ccvcx" event={"ID":"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6","Type":"ContainerDied","Data":"8c3dbaacb59577b561ee3487f1959941d381e778f421909d35e539deab533765"} Dec 10 11:18:57 crc kubenswrapper[4780]: I1210 11:18:57.785104 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c3dbaacb59577b561ee3487f1959941d381e778f421909d35e539deab533765" Dec 10 11:18:57 crc kubenswrapper[4780]: I1210 11:18:57.802437 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ff8ld" event={"ID":"75ef71d6-20b7-40b6-83c3-b3ee314c827f","Type":"ContainerStarted","Data":"e702f5906f04d6c029150d6cb56d27b334ba4732d1e131eaa1b795e598740cf0"} Dec 10 11:18:57 crc kubenswrapper[4780]: I1210 11:18:57.963069 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.031348 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.059047 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c52cc2d2-45a5-4e32-8320-db28a4a5b5c6" (UID: "c52cc2d2-45a5-4e32-8320-db28a4a5b5c6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.056093 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-operator-scripts\") pod \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\" (UID: \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\") " Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.059839 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8ch5\" (UniqueName: \"kubernetes.io/projected/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-kube-api-access-r8ch5\") pod \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\" (UID: \"c52cc2d2-45a5-4e32-8320-db28a4a5b5c6\") " Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.061647 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.068156 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.082803 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.084608 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-kube-api-access-r8ch5" (OuterVolumeSpecName: "kube-api-access-r8ch5") pod "c52cc2d2-45a5-4e32-8320-db28a4a5b5c6" (UID: "c52cc2d2-45a5-4e32-8320-db28a4a5b5c6"). InnerVolumeSpecName "kube-api-access-r8ch5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.181153 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k27zk\" (UniqueName: \"kubernetes.io/projected/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-kube-api-access-k27zk\") pod \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\" (UID: \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\") " Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.190422 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-kube-api-access-k27zk" (OuterVolumeSpecName: "kube-api-access-k27zk") pod "81f2ad5f-8eec-47e0-9958-ec5f0be03fae" (UID: "81f2ad5f-8eec-47e0-9958-ec5f0be03fae"). InnerVolumeSpecName "kube-api-access-k27zk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.191187 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "81f2ad5f-8eec-47e0-9958-ec5f0be03fae" (UID: "81f2ad5f-8eec-47e0-9958-ec5f0be03fae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.190465 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-operator-scripts\") pod \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\" (UID: \"81f2ad5f-8eec-47e0-9958-ec5f0be03fae\") " Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.211934 4780 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.219169 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8ch5\" (UniqueName: \"kubernetes.io/projected/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6-kube-api-access-r8ch5\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.219529 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k27zk\" (UniqueName: \"kubernetes.io/projected/81f2ad5f-8eec-47e0-9958-ec5f0be03fae-kube-api-access-k27zk\") on node \"crc\" DevicePath \"\"" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.749999 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-l52cn"] Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.769840 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.850078 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.876722 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3287-account-create-update-plmqg" event={"ID":"81f2ad5f-8eec-47e0-9958-ec5f0be03fae","Type":"ContainerDied","Data":"49e99385bd9187f2b406fcd4c27c6cb4738a826adf59c2a580bd7c95487ef42d"} Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.876832 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49e99385bd9187f2b406fcd4c27c6cb4738a826adf59c2a580bd7c95487ef42d" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.877046 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3287-account-create-update-plmqg" Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.882749 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" event={"ID":"b931e42a-a6d4-4b05-996c-f18454ddab28","Type":"ContainerStarted","Data":"eb6b00be16869751175660dc049b2cea81047828804d82e8a7bea742a1b731d8"} Dec 10 11:18:58 crc kubenswrapper[4780]: I1210 11:18:58.907722 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c38ca976-0284-49c1-836c-21f1a7d5354a","Type":"ContainerStarted","Data":"de2f06f2a6a3a7c405a19de279223644d34061af007eed5283d0e09561e21f54"} Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.043675 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerStarted","Data":"ace8f789ed174ebe2dfbfeb76915cc7f6f3c1819e9a807093c858ad6c614dc7b"} Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.084317 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-ccvcx" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.095843 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3264f745-f5c2-460e-b5e7-86eef3f26673","Type":"ContainerStarted","Data":"06f8d6856aaecedfb645bee58c2a5cb42e143124823d49bd72c8df56599924f5"} Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.197462 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-97bxk"] Dec 10 11:18:59 crc kubenswrapper[4780]: E1210 11:18:59.204168 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52cc2d2-45a5-4e32-8320-db28a4a5b5c6" containerName="mariadb-database-create" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.204214 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52cc2d2-45a5-4e32-8320-db28a4a5b5c6" containerName="mariadb-database-create" Dec 10 11:18:59 crc kubenswrapper[4780]: E1210 11:18:59.204255 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81f2ad5f-8eec-47e0-9958-ec5f0be03fae" containerName="mariadb-account-create-update" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.204264 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="81f2ad5f-8eec-47e0-9958-ec5f0be03fae" containerName="mariadb-account-create-update" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.204713 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="81f2ad5f-8eec-47e0-9958-ec5f0be03fae" containerName="mariadb-account-create-update" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.204751 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c52cc2d2-45a5-4e32-8320-db28a4a5b5c6" containerName="mariadb-database-create" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.209267 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.214028 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.225237 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.267554 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-97bxk"] Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.289967 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-config-data\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.290178 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.290333 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-678vv\" (UniqueName: \"kubernetes.io/projected/2fd04a14-0a8f-4491-a00c-3fb008e736ce-kube-api-access-678vv\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.290404 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-scripts\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.394894 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-config-data\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.395263 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.395361 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-678vv\" (UniqueName: \"kubernetes.io/projected/2fd04a14-0a8f-4491-a00c-3fb008e736ce-kube-api-access-678vv\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.395415 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-scripts\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.407373 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.413035 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-scripts\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.430075 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-678vv\" (UniqueName: \"kubernetes.io/projected/2fd04a14-0a8f-4491-a00c-3fb008e736ce-kube-api-access-678vv\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.432028 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-config-data\") pod \"nova-cell1-conductor-db-sync-97bxk\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.577238 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:18:59 crc kubenswrapper[4780]: I1210 11:18:59.962742 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:19:00 crc kubenswrapper[4780]: I1210 11:19:00.205812 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5e8191e7-63a9-42d8-9049-b315f1e86c2d","Type":"ContainerStarted","Data":"7bad8002e56743e69269b7a7d66ec3bc05288b1248944123f41b042decb3257a"} Dec 10 11:19:00 crc kubenswrapper[4780]: I1210 11:19:00.222360 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ff8ld" event={"ID":"75ef71d6-20b7-40b6-83c3-b3ee314c827f","Type":"ContainerStarted","Data":"b3a4aab9254d7d3152f1da5803b25aab61c300861a05794a305988f9ed012c34"} Dec 10 11:19:00 crc kubenswrapper[4780]: I1210 11:19:00.292997 4780 generic.go:334] "Generic (PLEG): container finished" podID="b931e42a-a6d4-4b05-996c-f18454ddab28" containerID="e9d20fcc6ee073b52b9ece46837ae2ee81c6d77ff3c1c12a408c69f318c2a6e6" exitCode=0 Dec 10 11:19:00 crc kubenswrapper[4780]: I1210 11:19:00.293185 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" event={"ID":"b931e42a-a6d4-4b05-996c-f18454ddab28","Type":"ContainerDied","Data":"e9d20fcc6ee073b52b9ece46837ae2ee81c6d77ff3c1c12a408c69f318c2a6e6"} Dec 10 11:19:00 crc kubenswrapper[4780]: I1210 11:19:00.302589 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"623968d1-d9da-4f8b-9501-2413ef496231","Type":"ContainerStarted","Data":"58af10de8a29eaf77989bbc51b7bda2f977ad79743fc93535b055b621719fa3a"} Dec 10 11:19:00 crc kubenswrapper[4780]: I1210 11:19:00.403545 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:00.448706 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-ff8ld" podStartSLOduration=6.448661141 podStartE2EDuration="6.448661141s" podCreationTimestamp="2025-12-10 11:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:00.286045975 +0000 UTC m=+2045.139439448" watchObservedRunningTime="2025-12-10 11:19:00.448661141 +0000 UTC m=+2045.302054584" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:00.561537 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.660135048 podStartE2EDuration="9.561499774s" podCreationTimestamp="2025-12-10 11:18:51 +0000 UTC" firstStartedPulling="2025-12-10 11:18:52.604338408 +0000 UTC m=+2037.457731851" lastFinishedPulling="2025-12-10 11:18:59.505703134 +0000 UTC m=+2044.359096577" observedRunningTime="2025-12-10 11:19:00.49208088 +0000 UTC m=+2045.345474323" watchObservedRunningTime="2025-12-10 11:19:00.561499774 +0000 UTC m=+2045.414893217" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:00.749439 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-97bxk"] Dec 10 11:19:02 crc kubenswrapper[4780]: W1210 11:19:00.784521 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fd04a14_0a8f_4491_a00c_3fb008e736ce.slice/crio-6c2eef32b7653892c04f92ba4fbf1f6d0c85a2d3991394673aae72116bab595d WatchSource:0}: Error finding container 6c2eef32b7653892c04f92ba4fbf1f6d0c85a2d3991394673aae72116bab595d: Status 404 returned error can't find the container with id 6c2eef32b7653892c04f92ba4fbf1f6d0c85a2d3991394673aae72116bab595d Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:00.795420 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:00.823287 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:01.443936 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerStarted","Data":"266d63aedd7989bfd3bdd0f9453266abceabbc26d6cc92099d513c70b12f28ac"} Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:01.452212 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-97bxk" event={"ID":"2fd04a14-0a8f-4491-a00c-3fb008e736ce","Type":"ContainerStarted","Data":"6c2eef32b7653892c04f92ba4fbf1f6d0c85a2d3991394673aae72116bab595d"} Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:01.493278 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"a4c56cbf13f0b58a88ae470c0b33e021ebda5393c2067f7ccb2e5ac1ebff5108"} Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.432773 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-xd5bf"] Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.448431 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.464901 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.465124 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-hflbp" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.464984 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.465533 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.479967 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-xd5bf"] Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.561614 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-97bxk" event={"ID":"2fd04a14-0a8f-4491-a00c-3fb008e736ce","Type":"ContainerStarted","Data":"7e72e29420e67e29c52213e47dffb654f1ca1b241d782ea5a72f1a769baf73c6"} Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.591182 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" event={"ID":"b931e42a-a6d4-4b05-996c-f18454ddab28","Type":"ContainerStarted","Data":"30530f8adbd8b2d90778923229046797e464c38a1a02084cc8dfcd0654b44c5a"} Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.591281 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.609662 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-scripts\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.610303 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-combined-ca-bundle\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.610396 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-config-data\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.610527 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcg5g\" (UniqueName: \"kubernetes.io/projected/a624d997-bd02-460a-9dd0-d636be0d70ef-kube-api-access-tcg5g\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.660216 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-97bxk" podStartSLOduration=4.660176163 podStartE2EDuration="4.660176163s" podCreationTimestamp="2025-12-10 11:18:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:02.590510643 +0000 UTC m=+2047.443904086" watchObservedRunningTime="2025-12-10 11:19:02.660176163 +0000 UTC m=+2047.513569596" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.683783 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" podStartSLOduration=7.683752066 podStartE2EDuration="7.683752066s" podCreationTimestamp="2025-12-10 11:18:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:02.625537938 +0000 UTC m=+2047.478931381" watchObservedRunningTime="2025-12-10 11:19:02.683752066 +0000 UTC m=+2047.537145509" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.752396 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcg5g\" (UniqueName: \"kubernetes.io/projected/a624d997-bd02-460a-9dd0-d636be0d70ef-kube-api-access-tcg5g\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.753194 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-scripts\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.753864 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-combined-ca-bundle\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.754552 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-config-data\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.784367 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-scripts\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.794665 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-combined-ca-bundle\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.807716 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-config-data\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:02 crc kubenswrapper[4780]: I1210 11:19:02.881835 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcg5g\" (UniqueName: \"kubernetes.io/projected/a624d997-bd02-460a-9dd0-d636be0d70ef-kube-api-access-tcg5g\") pod \"aodh-db-sync-xd5bf\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:03 crc kubenswrapper[4780]: I1210 11:19:03.128583 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:10 crc kubenswrapper[4780]: I1210 11:19:10.626483 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-xd5bf"] Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.057391 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3264f745-f5c2-460e-b5e7-86eef3f26673","Type":"ContainerStarted","Data":"1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24"} Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.067495 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="5e8191e7-63a9-42d8-9049-b315f1e86c2d" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://cb233546f0f0d370b2ae22b9f57643fe38785abd13a796b2e678f51556a5a1f2" gracePeriod=30 Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.067644 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5e8191e7-63a9-42d8-9049-b315f1e86c2d","Type":"ContainerStarted","Data":"cb233546f0f0d370b2ae22b9f57643fe38785abd13a796b2e678f51556a5a1f2"} Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.084613 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c38ca976-0284-49c1-836c-21f1a7d5354a","Type":"ContainerStarted","Data":"1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56"} Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.099369 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"623968d1-d9da-4f8b-9501-2413ef496231","Type":"ContainerStarted","Data":"dc25129db54dcb6dd83b87f7938ed470cb2104f7f249a6f62ca7fadc1ad99dff"} Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.102344 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=5.011073769 podStartE2EDuration="16.102264472s" podCreationTimestamp="2025-12-10 11:18:55 +0000 UTC" firstStartedPulling="2025-12-10 11:18:58.84850442 +0000 UTC m=+2043.701897863" lastFinishedPulling="2025-12-10 11:19:09.939695123 +0000 UTC m=+2054.793088566" observedRunningTime="2025-12-10 11:19:11.100083376 +0000 UTC m=+2055.953476819" watchObservedRunningTime="2025-12-10 11:19:11.102264472 +0000 UTC m=+2055.955657915" Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.102505 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-xd5bf" event={"ID":"a624d997-bd02-460a-9dd0-d636be0d70ef","Type":"ContainerStarted","Data":"afc46204355a5ce92947938ceefb23e4393d620d46dbeaefd98dbac78507ecae"} Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.198066 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.520500 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.571114 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=5.760127996 podStartE2EDuration="17.571074642s" podCreationTimestamp="2025-12-10 11:18:54 +0000 UTC" firstStartedPulling="2025-12-10 11:18:58.128668215 +0000 UTC m=+2042.982061658" lastFinishedPulling="2025-12-10 11:19:09.939614861 +0000 UTC m=+2054.793008304" observedRunningTime="2025-12-10 11:19:11.134459855 +0000 UTC m=+2055.987853318" watchObservedRunningTime="2025-12-10 11:19:11.571074642 +0000 UTC m=+2056.424468085" Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.630580 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-2nwlx"] Dec 10 11:19:11 crc kubenswrapper[4780]: I1210 11:19:11.630961 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" podUID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerName="dnsmasq-dns" containerID="cri-o://59af7a12e82c50ed9f7be18ab82d68521fd7f872f011b56513b8ad717d60a80d" gracePeriod=10 Dec 10 11:19:12 crc kubenswrapper[4780]: I1210 11:19:12.131577 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3264f745-f5c2-460e-b5e7-86eef3f26673","Type":"ContainerStarted","Data":"8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7"} Dec 10 11:19:12 crc kubenswrapper[4780]: I1210 11:19:12.148905 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="623968d1-d9da-4f8b-9501-2413ef496231" containerName="nova-metadata-log" containerID="cri-o://dc25129db54dcb6dd83b87f7938ed470cb2104f7f249a6f62ca7fadc1ad99dff" gracePeriod=30 Dec 10 11:19:12 crc kubenswrapper[4780]: I1210 11:19:12.149367 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="623968d1-d9da-4f8b-9501-2413ef496231" containerName="nova-metadata-metadata" containerID="cri-o://3a316966089277afb1078dee2c56c72aff17c7b1906461422b032c078479ac91" gracePeriod=30 Dec 10 11:19:12 crc kubenswrapper[4780]: I1210 11:19:12.149634 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"623968d1-d9da-4f8b-9501-2413ef496231","Type":"ContainerStarted","Data":"3a316966089277afb1078dee2c56c72aff17c7b1906461422b032c078479ac91"} Dec 10 11:19:12 crc kubenswrapper[4780]: I1210 11:19:12.195628 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=6.182178491 podStartE2EDuration="18.195602741s" podCreationTimestamp="2025-12-10 11:18:54 +0000 UTC" firstStartedPulling="2025-12-10 11:18:57.952212426 +0000 UTC m=+2042.805605869" lastFinishedPulling="2025-12-10 11:19:09.965636676 +0000 UTC m=+2054.819030119" observedRunningTime="2025-12-10 11:19:12.180046084 +0000 UTC m=+2057.033439527" watchObservedRunningTime="2025-12-10 11:19:12.195602741 +0000 UTC m=+2057.048996184" Dec 10 11:19:12 crc kubenswrapper[4780]: I1210 11:19:12.219778 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=7.160059619 podStartE2EDuration="18.219744277s" podCreationTimestamp="2025-12-10 11:18:54 +0000 UTC" firstStartedPulling="2025-12-10 11:18:58.885519566 +0000 UTC m=+2043.738913009" lastFinishedPulling="2025-12-10 11:19:09.945204224 +0000 UTC m=+2054.798597667" observedRunningTime="2025-12-10 11:19:12.206864809 +0000 UTC m=+2057.060258252" watchObservedRunningTime="2025-12-10 11:19:12.219744277 +0000 UTC m=+2057.073137720" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.217894 4780 generic.go:334] "Generic (PLEG): container finished" podID="623968d1-d9da-4f8b-9501-2413ef496231" containerID="3a316966089277afb1078dee2c56c72aff17c7b1906461422b032c078479ac91" exitCode=0 Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.219536 4780 generic.go:334] "Generic (PLEG): container finished" podID="623968d1-d9da-4f8b-9501-2413ef496231" containerID="dc25129db54dcb6dd83b87f7938ed470cb2104f7f249a6f62ca7fadc1ad99dff" exitCode=143 Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.218981 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"623968d1-d9da-4f8b-9501-2413ef496231","Type":"ContainerDied","Data":"3a316966089277afb1078dee2c56c72aff17c7b1906461422b032c078479ac91"} Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.219731 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"623968d1-d9da-4f8b-9501-2413ef496231","Type":"ContainerDied","Data":"dc25129db54dcb6dd83b87f7938ed470cb2104f7f249a6f62ca7fadc1ad99dff"} Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.251366 4780 generic.go:334] "Generic (PLEG): container finished" podID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerID="59af7a12e82c50ed9f7be18ab82d68521fd7f872f011b56513b8ad717d60a80d" exitCode=0 Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.254117 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" event={"ID":"8400fa88-5e91-417f-9495-12e8efcf25d0","Type":"ContainerDied","Data":"59af7a12e82c50ed9f7be18ab82d68521fd7f872f011b56513b8ad717d60a80d"} Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.693588 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:19:13 crc kubenswrapper[4780]: E1210 11:19:13.711038 4780 kubelet_node_status.go:756] "Failed to set some node status fields" err="failed to validate nodeIP: route ip+net: no such network interface" node="crc" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.856281 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-config-data\") pod \"623968d1-d9da-4f8b-9501-2413ef496231\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.856374 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vb8l\" (UniqueName: \"kubernetes.io/projected/623968d1-d9da-4f8b-9501-2413ef496231-kube-api-access-8vb8l\") pod \"623968d1-d9da-4f8b-9501-2413ef496231\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.856728 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-combined-ca-bundle\") pod \"623968d1-d9da-4f8b-9501-2413ef496231\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.856802 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/623968d1-d9da-4f8b-9501-2413ef496231-logs\") pod \"623968d1-d9da-4f8b-9501-2413ef496231\" (UID: \"623968d1-d9da-4f8b-9501-2413ef496231\") " Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.875993 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/623968d1-d9da-4f8b-9501-2413ef496231-kube-api-access-8vb8l" (OuterVolumeSpecName: "kube-api-access-8vb8l") pod "623968d1-d9da-4f8b-9501-2413ef496231" (UID: "623968d1-d9da-4f8b-9501-2413ef496231"). InnerVolumeSpecName "kube-api-access-8vb8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.888048 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/623968d1-d9da-4f8b-9501-2413ef496231-logs" (OuterVolumeSpecName: "logs") pod "623968d1-d9da-4f8b-9501-2413ef496231" (UID: "623968d1-d9da-4f8b-9501-2413ef496231"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.917738 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-config-data" (OuterVolumeSpecName: "config-data") pod "623968d1-d9da-4f8b-9501-2413ef496231" (UID: "623968d1-d9da-4f8b-9501-2413ef496231"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.946802 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "623968d1-d9da-4f8b-9501-2413ef496231" (UID: "623968d1-d9da-4f8b-9501-2413ef496231"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.970793 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.974336 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/623968d1-d9da-4f8b-9501-2413ef496231-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.974984 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/623968d1-d9da-4f8b-9501-2413ef496231-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:13 crc kubenswrapper[4780]: I1210 11:19:13.975258 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vb8l\" (UniqueName: \"kubernetes.io/projected/623968d1-d9da-4f8b-9501-2413ef496231-kube-api-access-8vb8l\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.089393 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.194325 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-sb\") pod \"8400fa88-5e91-417f-9495-12e8efcf25d0\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.194462 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-svc\") pod \"8400fa88-5e91-417f-9495-12e8efcf25d0\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.194727 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-swift-storage-0\") pod \"8400fa88-5e91-417f-9495-12e8efcf25d0\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.194790 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-config\") pod \"8400fa88-5e91-417f-9495-12e8efcf25d0\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.195015 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-nb\") pod \"8400fa88-5e91-417f-9495-12e8efcf25d0\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.195256 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzq76\" (UniqueName: \"kubernetes.io/projected/8400fa88-5e91-417f-9495-12e8efcf25d0-kube-api-access-pzq76\") pod \"8400fa88-5e91-417f-9495-12e8efcf25d0\" (UID: \"8400fa88-5e91-417f-9495-12e8efcf25d0\") " Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.265721 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8400fa88-5e91-417f-9495-12e8efcf25d0-kube-api-access-pzq76" (OuterVolumeSpecName: "kube-api-access-pzq76") pod "8400fa88-5e91-417f-9495-12e8efcf25d0" (UID: "8400fa88-5e91-417f-9495-12e8efcf25d0"). InnerVolumeSpecName "kube-api-access-pzq76". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.316308 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzq76\" (UniqueName: \"kubernetes.io/projected/8400fa88-5e91-417f-9495-12e8efcf25d0-kube-api-access-pzq76\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.318221 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"623968d1-d9da-4f8b-9501-2413ef496231","Type":"ContainerDied","Data":"58af10de8a29eaf77989bbc51b7bda2f977ad79743fc93535b055b621719fa3a"} Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.318347 4780 scope.go:117] "RemoveContainer" containerID="3a316966089277afb1078dee2c56c72aff17c7b1906461422b032c078479ac91" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.318271 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.332282 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" event={"ID":"8400fa88-5e91-417f-9495-12e8efcf25d0","Type":"ContainerDied","Data":"6255be7cb5b4ebace0937ced07c7842aafc7591bf78cdddd92658507def18af0"} Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.332674 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.396142 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.406828 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8400fa88-5e91-417f-9495-12e8efcf25d0" (UID: "8400fa88-5e91-417f-9495-12e8efcf25d0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.423323 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.424421 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8400fa88-5e91-417f-9495-12e8efcf25d0" (UID: "8400fa88-5e91-417f-9495-12e8efcf25d0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.424612 4780 scope.go:117] "RemoveContainer" containerID="dc25129db54dcb6dd83b87f7938ed470cb2104f7f249a6f62ca7fadc1ad99dff" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.434671 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8400fa88-5e91-417f-9495-12e8efcf25d0" (UID: "8400fa88-5e91-417f-9495-12e8efcf25d0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.483458 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-config" (OuterVolumeSpecName: "config") pod "8400fa88-5e91-417f-9495-12e8efcf25d0" (UID: "8400fa88-5e91-417f-9495-12e8efcf25d0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.486514 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8400fa88-5e91-417f-9495-12e8efcf25d0" (UID: "8400fa88-5e91-417f-9495-12e8efcf25d0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.495107 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.501975 4780 scope.go:117] "RemoveContainer" containerID="59af7a12e82c50ed9f7be18ab82d68521fd7f872f011b56513b8ad717d60a80d" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.529545 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.529608 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.529626 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.529639 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8400fa88-5e91-417f-9495-12e8efcf25d0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.534276 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:14 crc kubenswrapper[4780]: E1210 11:19:14.535178 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerName="init" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.535203 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerName="init" Dec 10 11:19:14 crc kubenswrapper[4780]: E1210 11:19:14.535232 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerName="dnsmasq-dns" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.535243 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerName="dnsmasq-dns" Dec 10 11:19:14 crc kubenswrapper[4780]: E1210 11:19:14.535283 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="623968d1-d9da-4f8b-9501-2413ef496231" containerName="nova-metadata-log" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.535292 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="623968d1-d9da-4f8b-9501-2413ef496231" containerName="nova-metadata-log" Dec 10 11:19:14 crc kubenswrapper[4780]: E1210 11:19:14.535306 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="623968d1-d9da-4f8b-9501-2413ef496231" containerName="nova-metadata-metadata" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.535313 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="623968d1-d9da-4f8b-9501-2413ef496231" containerName="nova-metadata-metadata" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.535731 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="623968d1-d9da-4f8b-9501-2413ef496231" containerName="nova-metadata-metadata" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.535777 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerName="dnsmasq-dns" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.535805 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="623968d1-d9da-4f8b-9501-2413ef496231" containerName="nova-metadata-log" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.546114 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.550610 4780 scope.go:117] "RemoveContainer" containerID="60ec72c619f2504a673e15784e8e1841b7f31ff8e3b7d73662f5bfe72f542ab4" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.551329 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.551575 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.589717 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.736158 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0bcb764-59b2-46b2-825e-1902287e5e62-logs\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.736358 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.736436 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.736491 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-config-data\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.736553 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4tkn\" (UniqueName: \"kubernetes.io/projected/d0bcb764-59b2-46b2-825e-1902287e5e62-kube-api-access-k4tkn\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.748722 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-2nwlx"] Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.761802 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-2nwlx"] Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.840187 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.840285 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-config-data\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.840343 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4tkn\" (UniqueName: \"kubernetes.io/projected/d0bcb764-59b2-46b2-825e-1902287e5e62-kube-api-access-k4tkn\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.840566 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0bcb764-59b2-46b2-825e-1902287e5e62-logs\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.840652 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.844359 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0bcb764-59b2-46b2-825e-1902287e5e62-logs\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.848893 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.849809 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.864547 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-config-data\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:14 crc kubenswrapper[4780]: I1210 11:19:14.876807 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4tkn\" (UniqueName: \"kubernetes.io/projected/d0bcb764-59b2-46b2-825e-1902287e5e62-kube-api-access-k4tkn\") pod \"nova-metadata-0\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " pod="openstack/nova-metadata-0" Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.027837 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.360417 4780 generic.go:334] "Generic (PLEG): container finished" podID="75ef71d6-20b7-40b6-83c3-b3ee314c827f" containerID="b3a4aab9254d7d3152f1da5803b25aab61c300861a05794a305988f9ed012c34" exitCode=0 Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.360752 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ff8ld" event={"ID":"75ef71d6-20b7-40b6-83c3-b3ee314c827f","Type":"ContainerDied","Data":"b3a4aab9254d7d3152f1da5803b25aab61c300861a05794a305988f9ed012c34"} Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.638801 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.665514 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.666141 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.723318 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.723379 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.777181 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.997364 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="623968d1-d9da-4f8b-9501-2413ef496231" path="/var/lib/kubelet/pods/623968d1-d9da-4f8b-9501-2413ef496231/volumes" Dec 10 11:19:15 crc kubenswrapper[4780]: I1210 11:19:15.998263 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8400fa88-5e91-417f-9495-12e8efcf25d0" path="/var/lib/kubelet/pods/8400fa88-5e91-417f-9495-12e8efcf25d0/volumes" Dec 10 11:19:16 crc kubenswrapper[4780]: I1210 11:19:16.490483 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 10 11:19:16 crc kubenswrapper[4780]: I1210 11:19:16.749577 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.238:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:19:16 crc kubenswrapper[4780]: I1210 11:19:16.750024 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.238:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:19:18 crc kubenswrapper[4780]: I1210 11:19:18.737410 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7756b9d78c-2nwlx" podUID="8400fa88-5e91-417f-9495-12e8efcf25d0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.212:5353: i/o timeout" Dec 10 11:19:20 crc kubenswrapper[4780]: W1210 11:19:20.449196 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0bcb764_59b2_46b2_825e_1902287e5e62.slice/crio-e0c2515e1fec3f10dcb3c5020779a1ef43ec21929b4281b84c97fd714a0302c3 WatchSource:0}: Error finding container e0c2515e1fec3f10dcb3c5020779a1ef43ec21929b4281b84c97fd714a0302c3: Status 404 returned error can't find the container with id e0c2515e1fec3f10dcb3c5020779a1ef43ec21929b4281b84c97fd714a0302c3 Dec 10 11:19:20 crc kubenswrapper[4780]: I1210 11:19:20.491112 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d0bcb764-59b2-46b2-825e-1902287e5e62","Type":"ContainerStarted","Data":"e0c2515e1fec3f10dcb3c5020779a1ef43ec21929b4281b84c97fd714a0302c3"} Dec 10 11:19:20 crc kubenswrapper[4780]: I1210 11:19:20.495770 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ff8ld" event={"ID":"75ef71d6-20b7-40b6-83c3-b3ee314c827f","Type":"ContainerDied","Data":"e702f5906f04d6c029150d6cb56d27b334ba4732d1e131eaa1b795e598740cf0"} Dec 10 11:19:20 crc kubenswrapper[4780]: I1210 11:19:20.495838 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e702f5906f04d6c029150d6cb56d27b334ba4732d1e131eaa1b795e598740cf0" Dec 10 11:19:20 crc kubenswrapper[4780]: I1210 11:19:20.954706 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:19:20 crc kubenswrapper[4780]: I1210 11:19:20.976014 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjlxd\" (UniqueName: \"kubernetes.io/projected/75ef71d6-20b7-40b6-83c3-b3ee314c827f-kube-api-access-pjlxd\") pod \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " Dec 10 11:19:20 crc kubenswrapper[4780]: I1210 11:19:20.976466 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-combined-ca-bundle\") pod \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " Dec 10 11:19:20 crc kubenswrapper[4780]: I1210 11:19:20.976504 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-scripts\") pod \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " Dec 10 11:19:20 crc kubenswrapper[4780]: I1210 11:19:20.976621 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-config-data\") pod \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\" (UID: \"75ef71d6-20b7-40b6-83c3-b3ee314c827f\") " Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.040232 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75ef71d6-20b7-40b6-83c3-b3ee314c827f-kube-api-access-pjlxd" (OuterVolumeSpecName: "kube-api-access-pjlxd") pod "75ef71d6-20b7-40b6-83c3-b3ee314c827f" (UID: "75ef71d6-20b7-40b6-83c3-b3ee314c827f"). InnerVolumeSpecName "kube-api-access-pjlxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.041877 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-scripts" (OuterVolumeSpecName: "scripts") pod "75ef71d6-20b7-40b6-83c3-b3ee314c827f" (UID: "75ef71d6-20b7-40b6-83c3-b3ee314c827f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.082805 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.082858 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjlxd\" (UniqueName: \"kubernetes.io/projected/75ef71d6-20b7-40b6-83c3-b3ee314c827f-kube-api-access-pjlxd\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.121941 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-config-data" (OuterVolumeSpecName: "config-data") pod "75ef71d6-20b7-40b6-83c3-b3ee314c827f" (UID: "75ef71d6-20b7-40b6-83c3-b3ee314c827f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.136707 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "75ef71d6-20b7-40b6-83c3-b3ee314c827f" (UID: "75ef71d6-20b7-40b6-83c3-b3ee314c827f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.185411 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.185462 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75ef71d6-20b7-40b6-83c3-b3ee314c827f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.522742 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d0bcb764-59b2-46b2-825e-1902287e5e62","Type":"ContainerStarted","Data":"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3"} Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.557674 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-xd5bf" event={"ID":"a624d997-bd02-460a-9dd0-d636be0d70ef","Type":"ContainerStarted","Data":"2c05254e53813076e4d628f0768d3f4cda15091643bc5ceafb945d1d46236498"} Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.570399 4780 generic.go:334] "Generic (PLEG): container finished" podID="2fd04a14-0a8f-4491-a00c-3fb008e736ce" containerID="7e72e29420e67e29c52213e47dffb654f1ca1b241d782ea5a72f1a769baf73c6" exitCode=0 Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.570537 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ff8ld" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.570820 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-97bxk" event={"ID":"2fd04a14-0a8f-4491-a00c-3fb008e736ce","Type":"ContainerDied","Data":"7e72e29420e67e29c52213e47dffb654f1ca1b241d782ea5a72f1a769baf73c6"} Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.571001 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 11:19:21 crc kubenswrapper[4780]: I1210 11:19:21.586554 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-xd5bf" podStartSLOduration=9.68917538 podStartE2EDuration="19.586525466s" podCreationTimestamp="2025-12-10 11:19:02 +0000 UTC" firstStartedPulling="2025-12-10 11:19:10.633537214 +0000 UTC m=+2055.486930657" lastFinishedPulling="2025-12-10 11:19:20.5308873 +0000 UTC m=+2065.384280743" observedRunningTime="2025-12-10 11:19:21.578422699 +0000 UTC m=+2066.431816142" watchObservedRunningTime="2025-12-10 11:19:21.586525466 +0000 UTC m=+2066.439918909" Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.178227 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.179027 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-log" containerID="cri-o://1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24" gracePeriod=30 Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.179530 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-api" containerID="cri-o://8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7" gracePeriod=30 Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.259993 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.260399 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="c38ca976-0284-49c1-836c-21f1a7d5354a" containerName="nova-scheduler-scheduler" containerID="cri-o://1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56" gracePeriod=30 Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.277497 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.590317 4780 generic.go:334] "Generic (PLEG): container finished" podID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerID="1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24" exitCode=143 Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.590825 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3264f745-f5c2-460e-b5e7-86eef3f26673","Type":"ContainerDied","Data":"1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24"} Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.598243 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d0bcb764-59b2-46b2-825e-1902287e5e62","Type":"ContainerStarted","Data":"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b"} Dec 10 11:19:22 crc kubenswrapper[4780]: I1210 11:19:22.653704 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=8.653672466 podStartE2EDuration="8.653672466s" podCreationTimestamp="2025-12-10 11:19:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:22.625797554 +0000 UTC m=+2067.479190997" watchObservedRunningTime="2025-12-10 11:19:22.653672466 +0000 UTC m=+2067.507065909" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.308523 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.411411 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-config-data\") pod \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.411527 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-combined-ca-bundle\") pod \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.411666 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-scripts\") pod \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.411704 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-678vv\" (UniqueName: \"kubernetes.io/projected/2fd04a14-0a8f-4491-a00c-3fb008e736ce-kube-api-access-678vv\") pod \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\" (UID: \"2fd04a14-0a8f-4491-a00c-3fb008e736ce\") " Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.422223 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-scripts" (OuterVolumeSpecName: "scripts") pod "2fd04a14-0a8f-4491-a00c-3fb008e736ce" (UID: "2fd04a14-0a8f-4491-a00c-3fb008e736ce"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.438598 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fd04a14-0a8f-4491-a00c-3fb008e736ce-kube-api-access-678vv" (OuterVolumeSpecName: "kube-api-access-678vv") pod "2fd04a14-0a8f-4491-a00c-3fb008e736ce" (UID: "2fd04a14-0a8f-4491-a00c-3fb008e736ce"). InnerVolumeSpecName "kube-api-access-678vv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.458496 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2fd04a14-0a8f-4491-a00c-3fb008e736ce" (UID: "2fd04a14-0a8f-4491-a00c-3fb008e736ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.504647 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-config-data" (OuterVolumeSpecName: "config-data") pod "2fd04a14-0a8f-4491-a00c-3fb008e736ce" (UID: "2fd04a14-0a8f-4491-a00c-3fb008e736ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.516480 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.516541 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.516558 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-678vv\" (UniqueName: \"kubernetes.io/projected/2fd04a14-0a8f-4491-a00c-3fb008e736ce-kube-api-access-678vv\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.516570 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fd04a14-0a8f-4491-a00c-3fb008e736ce-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.640788 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-97bxk" event={"ID":"2fd04a14-0a8f-4491-a00c-3fb008e736ce","Type":"ContainerDied","Data":"6c2eef32b7653892c04f92ba4fbf1f6d0c85a2d3991394673aae72116bab595d"} Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.640832 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-97bxk" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.640866 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c2eef32b7653892c04f92ba4fbf1f6d0c85a2d3991394673aae72116bab595d" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.641012 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerName="nova-metadata-log" containerID="cri-o://92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3" gracePeriod=30 Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.641132 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerName="nova-metadata-metadata" containerID="cri-o://01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b" gracePeriod=30 Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.764164 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 11:19:23 crc kubenswrapper[4780]: E1210 11:19:23.764867 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd04a14-0a8f-4491-a00c-3fb008e736ce" containerName="nova-cell1-conductor-db-sync" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.764892 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd04a14-0a8f-4491-a00c-3fb008e736ce" containerName="nova-cell1-conductor-db-sync" Dec 10 11:19:23 crc kubenswrapper[4780]: E1210 11:19:23.764911 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75ef71d6-20b7-40b6-83c3-b3ee314c827f" containerName="nova-manage" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.764938 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="75ef71d6-20b7-40b6-83c3-b3ee314c827f" containerName="nova-manage" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.765296 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="75ef71d6-20b7-40b6-83c3-b3ee314c827f" containerName="nova-manage" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.765366 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd04a14-0a8f-4491-a00c-3fb008e736ce" containerName="nova-cell1-conductor-db-sync" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.767202 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.770719 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.809742 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.828819 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.829354 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fm2d\" (UniqueName: \"kubernetes.io/projected/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-kube-api-access-4fm2d\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.829451 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.933710 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.934277 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fm2d\" (UniqueName: \"kubernetes.io/projected/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-kube-api-access-4fm2d\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.934380 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.941427 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.943564 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:23 crc kubenswrapper[4780]: I1210 11:19:23.967862 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fm2d\" (UniqueName: \"kubernetes.io/projected/3d2551f7-35c2-4f3d-aa10-a4e87dc81310-kube-api-access-4fm2d\") pod \"nova-cell1-conductor-0\" (UID: \"3d2551f7-35c2-4f3d-aa10-a4e87dc81310\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.258063 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.489500 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.577334 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-config-data\") pod \"d0bcb764-59b2-46b2-825e-1902287e5e62\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.577835 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0bcb764-59b2-46b2-825e-1902287e5e62-logs\") pod \"d0bcb764-59b2-46b2-825e-1902287e5e62\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.577959 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-nova-metadata-tls-certs\") pod \"d0bcb764-59b2-46b2-825e-1902287e5e62\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.578004 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-combined-ca-bundle\") pod \"d0bcb764-59b2-46b2-825e-1902287e5e62\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.578477 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4tkn\" (UniqueName: \"kubernetes.io/projected/d0bcb764-59b2-46b2-825e-1902287e5e62-kube-api-access-k4tkn\") pod \"d0bcb764-59b2-46b2-825e-1902287e5e62\" (UID: \"d0bcb764-59b2-46b2-825e-1902287e5e62\") " Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.579860 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0bcb764-59b2-46b2-825e-1902287e5e62-logs" (OuterVolumeSpecName: "logs") pod "d0bcb764-59b2-46b2-825e-1902287e5e62" (UID: "d0bcb764-59b2-46b2-825e-1902287e5e62"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.609082 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0bcb764-59b2-46b2-825e-1902287e5e62-kube-api-access-k4tkn" (OuterVolumeSpecName: "kube-api-access-k4tkn") pod "d0bcb764-59b2-46b2-825e-1902287e5e62" (UID: "d0bcb764-59b2-46b2-825e-1902287e5e62"). InnerVolumeSpecName "kube-api-access-k4tkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.678380 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-config-data" (OuterVolumeSpecName: "config-data") pod "d0bcb764-59b2-46b2-825e-1902287e5e62" (UID: "d0bcb764-59b2-46b2-825e-1902287e5e62"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.678539 4780 generic.go:334] "Generic (PLEG): container finished" podID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerID="01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b" exitCode=0 Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.678579 4780 generic.go:334] "Generic (PLEG): container finished" podID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerID="92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3" exitCode=143 Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.678611 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d0bcb764-59b2-46b2-825e-1902287e5e62","Type":"ContainerDied","Data":"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b"} Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.678661 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d0bcb764-59b2-46b2-825e-1902287e5e62","Type":"ContainerDied","Data":"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3"} Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.678674 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d0bcb764-59b2-46b2-825e-1902287e5e62","Type":"ContainerDied","Data":"e0c2515e1fec3f10dcb3c5020779a1ef43ec21929b4281b84c97fd714a0302c3"} Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.678693 4780 scope.go:117] "RemoveContainer" containerID="01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.678985 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.687145 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4tkn\" (UniqueName: \"kubernetes.io/projected/d0bcb764-59b2-46b2-825e-1902287e5e62-kube-api-access-k4tkn\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.687299 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.687312 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0bcb764-59b2-46b2-825e-1902287e5e62-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.699282 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0bcb764-59b2-46b2-825e-1902287e5e62" (UID: "d0bcb764-59b2-46b2-825e-1902287e5e62"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.707524 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "d0bcb764-59b2-46b2-825e-1902287e5e62" (UID: "d0bcb764-59b2-46b2-825e-1902287e5e62"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.791144 4780 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.791243 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0bcb764-59b2-46b2-825e-1902287e5e62-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.802870 4780 scope.go:117] "RemoveContainer" containerID="92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.844858 4780 scope.go:117] "RemoveContainer" containerID="01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b" Dec 10 11:19:24 crc kubenswrapper[4780]: E1210 11:19:24.849243 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b\": container with ID starting with 01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b not found: ID does not exist" containerID="01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.849319 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b"} err="failed to get container status \"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b\": rpc error: code = NotFound desc = could not find container \"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b\": container with ID starting with 01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b not found: ID does not exist" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.849354 4780 scope.go:117] "RemoveContainer" containerID="92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3" Dec 10 11:19:24 crc kubenswrapper[4780]: E1210 11:19:24.850112 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3\": container with ID starting with 92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3 not found: ID does not exist" containerID="92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.850193 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3"} err="failed to get container status \"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3\": rpc error: code = NotFound desc = could not find container \"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3\": container with ID starting with 92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3 not found: ID does not exist" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.850245 4780 scope.go:117] "RemoveContainer" containerID="01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.850718 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b"} err="failed to get container status \"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b\": rpc error: code = NotFound desc = could not find container \"01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b\": container with ID starting with 01cb80f6673935ac2b51fb7f2cb9c265456ad598e82bb46dfe8ec69cfcb2556b not found: ID does not exist" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.850740 4780 scope.go:117] "RemoveContainer" containerID="92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.851134 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3"} err="failed to get container status \"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3\": rpc error: code = NotFound desc = could not find container \"92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3\": container with ID starting with 92c568b8eb5035be9290c7d9cde7a995480b5aa5609f6d747440c3f5abd5dde3 not found: ID does not exist" Dec 10 11:19:24 crc kubenswrapper[4780]: I1210 11:19:24.950366 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.047705 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.077485 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.164451 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:25 crc kubenswrapper[4780]: E1210 11:19:25.177664 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerName="nova-metadata-log" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.177731 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerName="nova-metadata-log" Dec 10 11:19:25 crc kubenswrapper[4780]: E1210 11:19:25.177786 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerName="nova-metadata-metadata" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.177797 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerName="nova-metadata-metadata" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.178530 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerName="nova-metadata-log" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.178617 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" containerName="nova-metadata-metadata" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.183069 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.191190 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.191266 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.213204 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.213275 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.213318 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-config-data\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.213339 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbtwt\" (UniqueName: \"kubernetes.io/projected/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-kube-api-access-dbtwt\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.213626 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-logs\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.215471 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.317432 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-logs\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.318026 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.318181 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-logs\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.318205 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.318644 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-config-data\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.318799 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbtwt\" (UniqueName: \"kubernetes.io/projected/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-kube-api-access-dbtwt\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.324767 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.324808 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-config-data\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.326598 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.340387 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbtwt\" (UniqueName: \"kubernetes.io/projected/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-kube-api-access-dbtwt\") pod \"nova-metadata-0\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.469082 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.666757 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.667392 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:19:25 crc kubenswrapper[4780]: E1210 11:19:25.735161 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56 is running failed: container process not found" containerID="1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 11:19:25 crc kubenswrapper[4780]: E1210 11:19:25.739092 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56 is running failed: container process not found" containerID="1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.739409 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3d2551f7-35c2-4f3d-aa10-a4e87dc81310","Type":"ContainerStarted","Data":"ed1eeec70ef00d6a0db8e04f5edd13d6c5ec94f5c0488873473fc876376b54eb"} Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.739459 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3d2551f7-35c2-4f3d-aa10-a4e87dc81310","Type":"ContainerStarted","Data":"2b14719703c13fb137011454626a0bf908635df8a0cd43a2ff77d8528bf43ef4"} Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.741379 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:25 crc kubenswrapper[4780]: E1210 11:19:25.749409 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56 is running failed: container process not found" containerID="1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 11:19:25 crc kubenswrapper[4780]: E1210 11:19:25.749526 4780 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="c38ca976-0284-49c1-836c-21f1a7d5354a" containerName="nova-scheduler-scheduler" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.757635 4780 generic.go:334] "Generic (PLEG): container finished" podID="c38ca976-0284-49c1-836c-21f1a7d5354a" containerID="1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56" exitCode=0 Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.757728 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c38ca976-0284-49c1-836c-21f1a7d5354a","Type":"ContainerDied","Data":"1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56"} Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.813490 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.813451911 podStartE2EDuration="2.813451911s" podCreationTimestamp="2025-12-10 11:19:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:25.77234387 +0000 UTC m=+2070.625737313" watchObservedRunningTime="2025-12-10 11:19:25.813451911 +0000 UTC m=+2070.666845354" Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.818216 4780 generic.go:334] "Generic (PLEG): container finished" podID="a624d997-bd02-460a-9dd0-d636be0d70ef" containerID="2c05254e53813076e4d628f0768d3f4cda15091643bc5ceafb945d1d46236498" exitCode=0 Dec 10 11:19:25 crc kubenswrapper[4780]: I1210 11:19:25.818313 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-xd5bf" event={"ID":"a624d997-bd02-460a-9dd0-d636be0d70ef","Type":"ContainerDied","Data":"2c05254e53813076e4d628f0768d3f4cda15091643bc5ceafb945d1d46236498"} Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.028540 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0bcb764-59b2-46b2-825e-1902287e5e62" path="/var/lib/kubelet/pods/d0bcb764-59b2-46b2-825e-1902287e5e62/volumes" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.093376 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.214903 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-combined-ca-bundle\") pod \"c38ca976-0284-49c1-836c-21f1a7d5354a\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.215062 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27md9\" (UniqueName: \"kubernetes.io/projected/c38ca976-0284-49c1-836c-21f1a7d5354a-kube-api-access-27md9\") pod \"c38ca976-0284-49c1-836c-21f1a7d5354a\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.215095 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-config-data\") pod \"c38ca976-0284-49c1-836c-21f1a7d5354a\" (UID: \"c38ca976-0284-49c1-836c-21f1a7d5354a\") " Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.246509 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c38ca976-0284-49c1-836c-21f1a7d5354a-kube-api-access-27md9" (OuterVolumeSpecName: "kube-api-access-27md9") pod "c38ca976-0284-49c1-836c-21f1a7d5354a" (UID: "c38ca976-0284-49c1-836c-21f1a7d5354a"). InnerVolumeSpecName "kube-api-access-27md9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.309232 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-config-data" (OuterVolumeSpecName: "config-data") pod "c38ca976-0284-49c1-836c-21f1a7d5354a" (UID: "c38ca976-0284-49c1-836c-21f1a7d5354a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.322581 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27md9\" (UniqueName: \"kubernetes.io/projected/c38ca976-0284-49c1-836c-21f1a7d5354a-kube-api-access-27md9\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.323167 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.344251 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c38ca976-0284-49c1-836c-21f1a7d5354a" (UID: "c38ca976-0284-49c1-836c-21f1a7d5354a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.442655 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c38ca976-0284-49c1-836c-21f1a7d5354a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.650716 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.809406 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.840262 4780 generic.go:334] "Generic (PLEG): container finished" podID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerID="8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7" exitCode=0 Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.840369 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3264f745-f5c2-460e-b5e7-86eef3f26673","Type":"ContainerDied","Data":"8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7"} Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.840414 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3264f745-f5c2-460e-b5e7-86eef3f26673","Type":"ContainerDied","Data":"06f8d6856aaecedfb645bee58c2a5cb42e143124823d49bd72c8df56599924f5"} Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.840443 4780 scope.go:117] "RemoveContainer" containerID="8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.840605 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.861004 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"dec862f9-f21f-4c0f-9bc6-48d2d017c57e","Type":"ContainerStarted","Data":"cbad93438739ca170871df4d4d7a296ebbb8f54235d627ba49e4ae2fed58f7e8"} Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.880111 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.880420 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c38ca976-0284-49c1-836c-21f1a7d5354a","Type":"ContainerDied","Data":"de2f06f2a6a3a7c405a19de279223644d34061af007eed5283d0e09561e21f54"} Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.949134 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.953629 4780 scope.go:117] "RemoveContainer" containerID="1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.967115 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvrfb\" (UniqueName: \"kubernetes.io/projected/3264f745-f5c2-460e-b5e7-86eef3f26673-kube-api-access-wvrfb\") pod \"3264f745-f5c2-460e-b5e7-86eef3f26673\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.969569 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.970193 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-combined-ca-bundle\") pod \"3264f745-f5c2-460e-b5e7-86eef3f26673\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.970354 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3264f745-f5c2-460e-b5e7-86eef3f26673-logs\") pod \"3264f745-f5c2-460e-b5e7-86eef3f26673\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.970434 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-config-data\") pod \"3264f745-f5c2-460e-b5e7-86eef3f26673\" (UID: \"3264f745-f5c2-460e-b5e7-86eef3f26673\") " Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.979172 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3264f745-f5c2-460e-b5e7-86eef3f26673-logs" (OuterVolumeSpecName: "logs") pod "3264f745-f5c2-460e-b5e7-86eef3f26673" (UID: "3264f745-f5c2-460e-b5e7-86eef3f26673"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:19:26 crc kubenswrapper[4780]: I1210 11:19:26.993775 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3264f745-f5c2-460e-b5e7-86eef3f26673-kube-api-access-wvrfb" (OuterVolumeSpecName: "kube-api-access-wvrfb") pod "3264f745-f5c2-460e-b5e7-86eef3f26673" (UID: "3264f745-f5c2-460e-b5e7-86eef3f26673"). InnerVolumeSpecName "kube-api-access-wvrfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.001599 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:19:27 crc kubenswrapper[4780]: E1210 11:19:27.002672 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38ca976-0284-49c1-836c-21f1a7d5354a" containerName="nova-scheduler-scheduler" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.002698 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38ca976-0284-49c1-836c-21f1a7d5354a" containerName="nova-scheduler-scheduler" Dec 10 11:19:27 crc kubenswrapper[4780]: E1210 11:19:27.002716 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-api" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.002727 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-api" Dec 10 11:19:27 crc kubenswrapper[4780]: E1210 11:19:27.002760 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-log" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.002779 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-log" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.003324 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-log" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.003352 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" containerName="nova-api-api" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.003376 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38ca976-0284-49c1-836c-21f1a7d5354a" containerName="nova-scheduler-scheduler" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.006389 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.011005 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.026706 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.069143 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-config-data" (OuterVolumeSpecName: "config-data") pod "3264f745-f5c2-460e-b5e7-86eef3f26673" (UID: "3264f745-f5c2-460e-b5e7-86eef3f26673"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.078196 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvrfb\" (UniqueName: \"kubernetes.io/projected/3264f745-f5c2-460e-b5e7-86eef3f26673-kube-api-access-wvrfb\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.078240 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3264f745-f5c2-460e-b5e7-86eef3f26673-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.078252 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.089600 4780 scope.go:117] "RemoveContainer" containerID="8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7" Dec 10 11:19:27 crc kubenswrapper[4780]: E1210 11:19:27.092399 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7\": container with ID starting with 8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7 not found: ID does not exist" containerID="8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.092573 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7"} err="failed to get container status \"8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7\": rpc error: code = NotFound desc = could not find container \"8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7\": container with ID starting with 8a3f806fc030dd1c72e54cd8159e4a89e60e3081c90a7c108ef97ea84dd201c7 not found: ID does not exist" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.092703 4780 scope.go:117] "RemoveContainer" containerID="1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24" Dec 10 11:19:27 crc kubenswrapper[4780]: E1210 11:19:27.093106 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24\": container with ID starting with 1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24 not found: ID does not exist" containerID="1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.093274 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24"} err="failed to get container status \"1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24\": rpc error: code = NotFound desc = could not find container \"1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24\": container with ID starting with 1927a16f1987dd47e6c40891de4a223749087ac80390db6bfd2b721b35e62e24 not found: ID does not exist" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.093392 4780 scope.go:117] "RemoveContainer" containerID="1a9a9d3282a3180159d7ca7878ddf755e5f48acfe5299cb5086826436b5c3f56" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.101684 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3264f745-f5c2-460e-b5e7-86eef3f26673" (UID: "3264f745-f5c2-460e-b5e7-86eef3f26673"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.180807 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-config-data\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.182467 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.182636 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8k6t\" (UniqueName: \"kubernetes.io/projected/462049e2-388d-465c-b954-3df68a7c0aea-kube-api-access-w8k6t\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.183113 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3264f745-f5c2-460e-b5e7-86eef3f26673-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.288394 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.288552 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8k6t\" (UniqueName: \"kubernetes.io/projected/462049e2-388d-465c-b954-3df68a7c0aea-kube-api-access-w8k6t\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.289122 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-config-data\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.304732 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.309735 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-config-data\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.323853 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8k6t\" (UniqueName: \"kubernetes.io/projected/462049e2-388d-465c-b954-3df68a7c0aea-kube-api-access-w8k6t\") pod \"nova-scheduler-0\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.350223 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.571199 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.604380 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.611755 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-config-data\") pod \"a624d997-bd02-460a-9dd0-d636be0d70ef\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.611996 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-scripts\") pod \"a624d997-bd02-460a-9dd0-d636be0d70ef\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.612053 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcg5g\" (UniqueName: \"kubernetes.io/projected/a624d997-bd02-460a-9dd0-d636be0d70ef-kube-api-access-tcg5g\") pod \"a624d997-bd02-460a-9dd0-d636be0d70ef\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.612126 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-combined-ca-bundle\") pod \"a624d997-bd02-460a-9dd0-d636be0d70ef\" (UID: \"a624d997-bd02-460a-9dd0-d636be0d70ef\") " Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.643349 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.646269 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a624d997-bd02-460a-9dd0-d636be0d70ef-kube-api-access-tcg5g" (OuterVolumeSpecName: "kube-api-access-tcg5g") pod "a624d997-bd02-460a-9dd0-d636be0d70ef" (UID: "a624d997-bd02-460a-9dd0-d636be0d70ef"). InnerVolumeSpecName "kube-api-access-tcg5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.686737 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-scripts" (OuterVolumeSpecName: "scripts") pod "a624d997-bd02-460a-9dd0-d636be0d70ef" (UID: "a624d997-bd02-460a-9dd0-d636be0d70ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.710905 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-config-data" (OuterVolumeSpecName: "config-data") pod "a624d997-bd02-460a-9dd0-d636be0d70ef" (UID: "a624d997-bd02-460a-9dd0-d636be0d70ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.737854 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.737900 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcg5g\" (UniqueName: \"kubernetes.io/projected/a624d997-bd02-460a-9dd0-d636be0d70ef-kube-api-access-tcg5g\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.737940 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.769284 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a624d997-bd02-460a-9dd0-d636be0d70ef" (UID: "a624d997-bd02-460a-9dd0-d636be0d70ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.774364 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 11:19:27 crc kubenswrapper[4780]: E1210 11:19:27.780250 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a624d997-bd02-460a-9dd0-d636be0d70ef" containerName="aodh-db-sync" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.780299 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a624d997-bd02-460a-9dd0-d636be0d70ef" containerName="aodh-db-sync" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.781025 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="a624d997-bd02-460a-9dd0-d636be0d70ef" containerName="aodh-db-sync" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.791008 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.801093 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.836160 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.847980 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-logs\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.848032 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9g9t\" (UniqueName: \"kubernetes.io/projected/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-kube-api-access-g9g9t\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.848101 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.848169 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-config-data\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.849014 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624d997-bd02-460a-9dd0-d636be0d70ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.921562 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-xd5bf" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.924831 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-xd5bf" event={"ID":"a624d997-bd02-460a-9dd0-d636be0d70ef","Type":"ContainerDied","Data":"afc46204355a5ce92947938ceefb23e4393d620d46dbeaefd98dbac78507ecae"} Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.924940 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afc46204355a5ce92947938ceefb23e4393d620d46dbeaefd98dbac78507ecae" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.937000 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"dec862f9-f21f-4c0f-9bc6-48d2d017c57e","Type":"ContainerStarted","Data":"05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb"} Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.937064 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"dec862f9-f21f-4c0f-9bc6-48d2d017c57e","Type":"ContainerStarted","Data":"eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba"} Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.953426 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-logs\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.953484 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9g9t\" (UniqueName: \"kubernetes.io/projected/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-kube-api-access-g9g9t\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.953592 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.953676 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-config-data\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.956257 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-logs\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.961821 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.976035 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-config-data\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.989136 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.989109577 podStartE2EDuration="2.989109577s" podCreationTimestamp="2025-12-10 11:19:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:27.97007247 +0000 UTC m=+2072.823465913" watchObservedRunningTime="2025-12-10 11:19:27.989109577 +0000 UTC m=+2072.842503010" Dec 10 11:19:27 crc kubenswrapper[4780]: I1210 11:19:27.994427 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9g9t\" (UniqueName: \"kubernetes.io/projected/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-kube-api-access-g9g9t\") pod \"nova-api-0\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " pod="openstack/nova-api-0" Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.003540 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3264f745-f5c2-460e-b5e7-86eef3f26673" path="/var/lib/kubelet/pods/3264f745-f5c2-460e-b5e7-86eef3f26673/volumes" Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.005575 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c38ca976-0284-49c1-836c-21f1a7d5354a" path="/var/lib/kubelet/pods/c38ca976-0284-49c1-836c-21f1a7d5354a/volumes" Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.049118 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.135980 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.793813 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.971356 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"462049e2-388d-465c-b954-3df68a7c0aea","Type":"ContainerStarted","Data":"a1a518432f9312ba0c4fd146c5002890d9676b95de8e2e786c1880cb6b14f211"} Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.972395 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"462049e2-388d-465c-b954-3df68a7c0aea","Type":"ContainerStarted","Data":"bf0aa216b1b4865fb319f8f0f79d67caad8bdea66fb1fce6b6ff720e71056170"} Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.975242 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156","Type":"ContainerStarted","Data":"89d5a0e12818aa11ff91557dd1235f59aa25084907f31a3f764fba6ca11223ab"} Dec 10 11:19:28 crc kubenswrapper[4780]: I1210 11:19:28.996911 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.99688582 podStartE2EDuration="2.99688582s" podCreationTimestamp="2025-12-10 11:19:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:28.990974869 +0000 UTC m=+2073.844368322" watchObservedRunningTime="2025-12-10 11:19:28.99688582 +0000 UTC m=+2073.850279253" Dec 10 11:19:29 crc kubenswrapper[4780]: I1210 11:19:29.429118 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:19:29 crc kubenswrapper[4780]: I1210 11:19:29.429869 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="82752cbd-d657-4c6d-94f6-e9e75a4452c2" containerName="kube-state-metrics" containerID="cri-o://bef97abe36aa9940e552f320a20692d088e93eb61bfc56f928d12303d1749922" gracePeriod=30 Dec 10 11:19:29 crc kubenswrapper[4780]: I1210 11:19:29.618369 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:19:29 crc kubenswrapper[4780]: I1210 11:19:29.618663 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="97a2ba0e-4951-4b10-812a-41d11be1bcc5" containerName="mysqld-exporter" containerID="cri-o://6d4ec7439ea09ed125953fd4e5efb1bb5b2516e71c6428aa76a2a0600818a0ad" gracePeriod=30 Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.005780 4780 generic.go:334] "Generic (PLEG): container finished" podID="82752cbd-d657-4c6d-94f6-e9e75a4452c2" containerID="bef97abe36aa9940e552f320a20692d088e93eb61bfc56f928d12303d1749922" exitCode=2 Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.008982 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"82752cbd-d657-4c6d-94f6-e9e75a4452c2","Type":"ContainerDied","Data":"bef97abe36aa9940e552f320a20692d088e93eb61bfc56f928d12303d1749922"} Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.021819 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156","Type":"ContainerStarted","Data":"695dec36217ee2ffa30feaa495b93820a10b03452cbb95b902ac64304276af8d"} Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.021901 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156","Type":"ContainerStarted","Data":"04c6453086c9006225a89468ef8172eeb34cc49a69eb5b5d8d50eb37105d630a"} Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.031653 4780 generic.go:334] "Generic (PLEG): container finished" podID="97a2ba0e-4951-4b10-812a-41d11be1bcc5" containerID="6d4ec7439ea09ed125953fd4e5efb1bb5b2516e71c6428aa76a2a0600818a0ad" exitCode=2 Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.032162 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"97a2ba0e-4951-4b10-812a-41d11be1bcc5","Type":"ContainerDied","Data":"6d4ec7439ea09ed125953fd4e5efb1bb5b2516e71c6428aa76a2a0600818a0ad"} Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.063214 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.070408 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.070380782 podStartE2EDuration="3.070380782s" podCreationTimestamp="2025-12-10 11:19:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:30.044663985 +0000 UTC m=+2074.898057438" watchObservedRunningTime="2025-12-10 11:19:30.070380782 +0000 UTC m=+2074.923774215" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.237567 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cx4fg\" (UniqueName: \"kubernetes.io/projected/82752cbd-d657-4c6d-94f6-e9e75a4452c2-kube-api-access-cx4fg\") pod \"82752cbd-d657-4c6d-94f6-e9e75a4452c2\" (UID: \"82752cbd-d657-4c6d-94f6-e9e75a4452c2\") " Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.250657 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82752cbd-d657-4c6d-94f6-e9e75a4452c2-kube-api-access-cx4fg" (OuterVolumeSpecName: "kube-api-access-cx4fg") pod "82752cbd-d657-4c6d-94f6-e9e75a4452c2" (UID: "82752cbd-d657-4c6d-94f6-e9e75a4452c2"). InnerVolumeSpecName "kube-api-access-cx4fg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.344363 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cx4fg\" (UniqueName: \"kubernetes.io/projected/82752cbd-d657-4c6d-94f6-e9e75a4452c2-kube-api-access-cx4fg\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.470144 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.470222 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.470946 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.651996 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-config-data\") pod \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.652301 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-combined-ca-bundle\") pod \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.652516 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prgx7\" (UniqueName: \"kubernetes.io/projected/97a2ba0e-4951-4b10-812a-41d11be1bcc5-kube-api-access-prgx7\") pod \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\" (UID: \"97a2ba0e-4951-4b10-812a-41d11be1bcc5\") " Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.713829 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97a2ba0e-4951-4b10-812a-41d11be1bcc5-kube-api-access-prgx7" (OuterVolumeSpecName: "kube-api-access-prgx7") pod "97a2ba0e-4951-4b10-812a-41d11be1bcc5" (UID: "97a2ba0e-4951-4b10-812a-41d11be1bcc5"). InnerVolumeSpecName "kube-api-access-prgx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.768630 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prgx7\" (UniqueName: \"kubernetes.io/projected/97a2ba0e-4951-4b10-812a-41d11be1bcc5-kube-api-access-prgx7\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.778195 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97a2ba0e-4951-4b10-812a-41d11be1bcc5" (UID: "97a2ba0e-4951-4b10-812a-41d11be1bcc5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.838149 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-config-data" (OuterVolumeSpecName: "config-data") pod "97a2ba0e-4951-4b10-812a-41d11be1bcc5" (UID: "97a2ba0e-4951-4b10-812a-41d11be1bcc5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.871643 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:30 crc kubenswrapper[4780]: I1210 11:19:30.871713 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a2ba0e-4951-4b10-812a-41d11be1bcc5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.054869 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"97a2ba0e-4951-4b10-812a-41d11be1bcc5","Type":"ContainerDied","Data":"a45fd5be8adf2bbcba174c254104c4b620a487ada6aa950e25e7c30495a1bb7e"} Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.055534 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.056392 4780 scope.go:117] "RemoveContainer" containerID="6d4ec7439ea09ed125953fd4e5efb1bb5b2516e71c6428aa76a2a0600818a0ad" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.061413 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.061603 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"82752cbd-d657-4c6d-94f6-e9e75a4452c2","Type":"ContainerDied","Data":"c7469e1ae2fccd02425e0d2fb3215f70fae34bf139489ae83244595a22003d2b"} Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.267796 4780 scope.go:117] "RemoveContainer" containerID="bef97abe36aa9940e552f320a20692d088e93eb61bfc56f928d12303d1749922" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.272520 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.300127 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.360944 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:19:31 crc kubenswrapper[4780]: E1210 11:19:31.362023 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82752cbd-d657-4c6d-94f6-e9e75a4452c2" containerName="kube-state-metrics" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.362049 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="82752cbd-d657-4c6d-94f6-e9e75a4452c2" containerName="kube-state-metrics" Dec 10 11:19:31 crc kubenswrapper[4780]: E1210 11:19:31.362104 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97a2ba0e-4951-4b10-812a-41d11be1bcc5" containerName="mysqld-exporter" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.362113 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="97a2ba0e-4951-4b10-812a-41d11be1bcc5" containerName="mysqld-exporter" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.362513 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="97a2ba0e-4951-4b10-812a-41d11be1bcc5" containerName="mysqld-exporter" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.362539 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="82752cbd-d657-4c6d-94f6-e9e75a4452c2" containerName="kube-state-metrics" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.374247 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.378424 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.379026 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.386656 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.386710 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.386770 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4sp4\" (UniqueName: \"kubernetes.io/projected/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-kube-api-access-h4sp4\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.386929 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-config-data\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.387634 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.441686 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.466660 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.490493 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-config-data\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.490754 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.490799 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.490911 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4sp4\" (UniqueName: \"kubernetes.io/projected/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-kube-api-access-h4sp4\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.509181 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.513592 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.517822 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.523283 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-config-data\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.523959 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.524406 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.530290 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4sp4\" (UniqueName: \"kubernetes.io/projected/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-kube-api-access-h4sp4\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.548377 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4\") " pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.572610 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.705885 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcs28\" (UniqueName: \"kubernetes.io/projected/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-api-access-kcs28\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.705991 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.706067 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.706371 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.730905 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.809570 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.810200 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcs28\" (UniqueName: \"kubernetes.io/projected/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-api-access-kcs28\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.810244 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.810313 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.818687 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.819084 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.819324 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.834881 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcs28\" (UniqueName: \"kubernetes.io/projected/d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd-kube-api-access-kcs28\") pod \"kube-state-metrics-0\" (UID: \"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.983343 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82752cbd-d657-4c6d-94f6-e9e75a4452c2" path="/var/lib/kubelet/pods/82752cbd-d657-4c6d-94f6-e9e75a4452c2/volumes" Dec 10 11:19:31 crc kubenswrapper[4780]: I1210 11:19:31.984493 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97a2ba0e-4951-4b10-812a-41d11be1bcc5" path="/var/lib/kubelet/pods/97a2ba0e-4951-4b10-812a-41d11be1bcc5/volumes" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.019364 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.351161 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.381803 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 11:19:32 crc kubenswrapper[4780]: W1210 11:19:32.382407 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podacedb1e6_5bdd_428b_8a8a_92b87a1ce4a4.slice/crio-d769128651ddd88b0804779b6ca24a8930beebeb612113aaf74b195b072a2366 WatchSource:0}: Error finding container d769128651ddd88b0804779b6ca24a8930beebeb612113aaf74b195b072a2366: Status 404 returned error can't find the container with id d769128651ddd88b0804779b6ca24a8930beebeb612113aaf74b195b072a2366 Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.651319 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.802059 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.812128 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.824367 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-hflbp" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.825574 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.834688 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.870131 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.952184 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4xs5\" (UniqueName: \"kubernetes.io/projected/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-kube-api-access-p4xs5\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.952300 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.952354 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-config-data\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:32 crc kubenswrapper[4780]: I1210 11:19:32.952698 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-scripts\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.055755 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-scripts\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.055988 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4xs5\" (UniqueName: \"kubernetes.io/projected/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-kube-api-access-p4xs5\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.056026 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.056077 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-config-data\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.065204 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.065489 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-config-data\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.080128 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-scripts\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.083791 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4xs5\" (UniqueName: \"kubernetes.io/projected/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-kube-api-access-p4xs5\") pod \"aodh-0\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " pod="openstack/aodh-0" Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.102939 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4","Type":"ContainerStarted","Data":"d769128651ddd88b0804779b6ca24a8930beebeb612113aaf74b195b072a2366"} Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.104719 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd","Type":"ContainerStarted","Data":"9ffda9883028e66d7b7cc778dcf2e4d007d86a8c9979a82ba9a7227becb02e78"} Dec 10 11:19:33 crc kubenswrapper[4780]: I1210 11:19:33.162815 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.205366 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4","Type":"ContainerStarted","Data":"199b0a22351b8b91dfe47d4155a0133db7e7b86d4881dedfddce06d09785077e"} Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.223317 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.256725 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.3480051570000002 podStartE2EDuration="3.256692588s" podCreationTimestamp="2025-12-10 11:19:31 +0000 UTC" firstStartedPulling="2025-12-10 11:19:32.384882976 +0000 UTC m=+2077.238276419" lastFinishedPulling="2025-12-10 11:19:33.293570407 +0000 UTC m=+2078.146963850" observedRunningTime="2025-12-10 11:19:34.24895678 +0000 UTC m=+2079.102350223" watchObservedRunningTime="2025-12-10 11:19:34.256692588 +0000 UTC m=+2079.110086061" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.373384 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-97lpn"] Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.378613 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.395885 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-97lpn"] Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.464553 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxltm\" (UniqueName: \"kubernetes.io/projected/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-kube-api-access-zxltm\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.476074 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-utilities\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.476811 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-catalog-content\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.509306 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.580437 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-utilities\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.581267 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-utilities\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.581952 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-catalog-content\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.582601 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-catalog-content\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.582877 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxltm\" (UniqueName: \"kubernetes.io/projected/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-kube-api-access-zxltm\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.642377 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxltm\" (UniqueName: \"kubernetes.io/projected/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-kube-api-access-zxltm\") pod \"redhat-operators-97lpn\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.763858 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.764316 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="ceilometer-central-agent" containerID="cri-o://5a56e3544792de1bbaa784b65daf1709ada6580e213bda0061f7ab21c38923d6" gracePeriod=30 Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.765172 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="proxy-httpd" containerID="cri-o://266d63aedd7989bfd3bdd0f9453266abceabbc26d6cc92099d513c70b12f28ac" gracePeriod=30 Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.765268 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="sg-core" containerID="cri-o://ace8f789ed174ebe2dfbfeb76915cc7f6f3c1819e9a807093c858ad6c614dc7b" gracePeriod=30 Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.765326 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="ceilometer-notification-agent" containerID="cri-o://293c85f4d792da1145749fc4363a71f5fd7a2163692dc028fb00c99011b2c3bf" gracePeriod=30 Dec 10 11:19:34 crc kubenswrapper[4780]: I1210 11:19:34.854059 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.272966 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd","Type":"ContainerStarted","Data":"f896356e2d1283a05de772596f21121140480daaf8d628b3fa02c2f175ddd4f9"} Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.274684 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.289414 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerStarted","Data":"52adfe63335fef0025caf6afe0a4cadea9dbe2082ff88f81238a80f861e84d3d"} Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.313556 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.834223006 podStartE2EDuration="4.313521334s" podCreationTimestamp="2025-12-10 11:19:31 +0000 UTC" firstStartedPulling="2025-12-10 11:19:32.660047848 +0000 UTC m=+2077.513441291" lastFinishedPulling="2025-12-10 11:19:33.139346176 +0000 UTC m=+2077.992739619" observedRunningTime="2025-12-10 11:19:35.308150967 +0000 UTC m=+2080.161544420" watchObservedRunningTime="2025-12-10 11:19:35.313521334 +0000 UTC m=+2080.166914777" Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.317602 4780 generic.go:334] "Generic (PLEG): container finished" podID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerID="266d63aedd7989bfd3bdd0f9453266abceabbc26d6cc92099d513c70b12f28ac" exitCode=0 Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.317976 4780 generic.go:334] "Generic (PLEG): container finished" podID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerID="ace8f789ed174ebe2dfbfeb76915cc7f6f3c1819e9a807093c858ad6c614dc7b" exitCode=2 Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.319697 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerDied","Data":"266d63aedd7989bfd3bdd0f9453266abceabbc26d6cc92099d513c70b12f28ac"} Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.319948 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerDied","Data":"ace8f789ed174ebe2dfbfeb76915cc7f6f3c1819e9a807093c858ad6c614dc7b"} Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.470508 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.470551 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 11:19:35 crc kubenswrapper[4780]: I1210 11:19:35.638561 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-97lpn"] Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.361674 4780 generic.go:334] "Generic (PLEG): container finished" podID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerID="29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80" exitCode=0 Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.361890 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97lpn" event={"ID":"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae","Type":"ContainerDied","Data":"29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80"} Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.362656 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97lpn" event={"ID":"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae","Type":"ContainerStarted","Data":"2ebc2f5bc2904854d418f2b0b26f9903ded5da0675c1bd3e10221f2966a67bd3"} Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.375085 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerStarted","Data":"8d01dafc17d1fbcd3e076e24bea29818b9359b2f327d963a78c13fa5998f177f"} Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.390357 4780 generic.go:334] "Generic (PLEG): container finished" podID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerID="293c85f4d792da1145749fc4363a71f5fd7a2163692dc028fb00c99011b2c3bf" exitCode=0 Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.390434 4780 generic.go:334] "Generic (PLEG): container finished" podID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerID="5a56e3544792de1bbaa784b65daf1709ada6580e213bda0061f7ab21c38923d6" exitCode=0 Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.392171 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerDied","Data":"293c85f4d792da1145749fc4363a71f5fd7a2163692dc028fb00c99011b2c3bf"} Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.392222 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerDied","Data":"5a56e3544792de1bbaa784b65daf1709ada6580e213bda0061f7ab21c38923d6"} Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.512233 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.247:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:19:36 crc kubenswrapper[4780]: I1210 11:19:36.512709 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.247:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.146279 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.263465 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-log-httpd\") pod \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.263881 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-config-data\") pod \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.264108 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4bdb\" (UniqueName: \"kubernetes.io/projected/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-kube-api-access-r4bdb\") pod \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.264286 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-scripts\") pod \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.264364 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-sg-core-conf-yaml\") pod \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.264460 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-run-httpd\") pod \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.264565 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-combined-ca-bundle\") pod \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\" (UID: \"c9ca683b-68dc-4bf8-8977-0c9a6cef383b\") " Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.268674 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c9ca683b-68dc-4bf8-8977-0c9a6cef383b" (UID: "c9ca683b-68dc-4bf8-8977-0c9a6cef383b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.275158 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c9ca683b-68dc-4bf8-8977-0c9a6cef383b" (UID: "c9ca683b-68dc-4bf8-8977-0c9a6cef383b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.315294 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-scripts" (OuterVolumeSpecName: "scripts") pod "c9ca683b-68dc-4bf8-8977-0c9a6cef383b" (UID: "c9ca683b-68dc-4bf8-8977-0c9a6cef383b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.358351 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.410249 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.417265 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.421674 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.414339 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-kube-api-access-r4bdb" (OuterVolumeSpecName: "kube-api-access-r4bdb") pod "c9ca683b-68dc-4bf8-8977-0c9a6cef383b" (UID: "c9ca683b-68dc-4bf8-8977-0c9a6cef383b"). InnerVolumeSpecName "kube-api-access-r4bdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.536198 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4bdb\" (UniqueName: \"kubernetes.io/projected/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-kube-api-access-r4bdb\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.592677 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c9ca683b-68dc-4bf8-8977-0c9a6cef383b" (UID: "c9ca683b-68dc-4bf8-8977-0c9a6cef383b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.638951 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.650159 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.651079 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c9ca683b-68dc-4bf8-8977-0c9a6cef383b","Type":"ContainerDied","Data":"afbe7c8e397a335b45f9376293edaba33b95bb534ada30c8e33eb854ad43ba39"} Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.651179 4780 scope.go:117] "RemoveContainer" containerID="266d63aedd7989bfd3bdd0f9453266abceabbc26d6cc92099d513c70b12f28ac" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.693009 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.700819 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9ca683b-68dc-4bf8-8977-0c9a6cef383b" (UID: "c9ca683b-68dc-4bf8-8977-0c9a6cef383b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.744117 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.747748 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.869709 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-config-data" (OuterVolumeSpecName: "config-data") pod "c9ca683b-68dc-4bf8-8977-0c9a6cef383b" (UID: "c9ca683b-68dc-4bf8-8977-0c9a6cef383b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.895177 4780 scope.go:117] "RemoveContainer" containerID="ace8f789ed174ebe2dfbfeb76915cc7f6f3c1819e9a807093c858ad6c614dc7b" Dec 10 11:19:37 crc kubenswrapper[4780]: I1210 11:19:37.899089 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:37.955517 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9ca683b-68dc-4bf8-8977-0c9a6cef383b-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:37.992104 4780 scope.go:117] "RemoveContainer" containerID="293c85f4d792da1145749fc4363a71f5fd7a2163692dc028fb00c99011b2c3bf" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.076406 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.094211 4780 scope.go:117] "RemoveContainer" containerID="5a56e3544792de1bbaa784b65daf1709ada6580e213bda0061f7ab21c38923d6" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.095484 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.120502 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:19:38 crc kubenswrapper[4780]: E1210 11:19:38.129646 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="ceilometer-notification-agent" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.129710 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="ceilometer-notification-agent" Dec 10 11:19:38 crc kubenswrapper[4780]: E1210 11:19:38.129760 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="proxy-httpd" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.129767 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="proxy-httpd" Dec 10 11:19:38 crc kubenswrapper[4780]: E1210 11:19:38.129789 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="sg-core" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.129796 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="sg-core" Dec 10 11:19:38 crc kubenswrapper[4780]: E1210 11:19:38.129817 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="ceilometer-central-agent" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.129824 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="ceilometer-central-agent" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.130259 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="ceilometer-notification-agent" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.130297 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="proxy-httpd" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.130313 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="ceilometer-central-agent" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.130329 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" containerName="sg-core" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.151575 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.151731 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.151878 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.159133 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.159522 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.159810 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.161143 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.277087 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.277193 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc2j6\" (UniqueName: \"kubernetes.io/projected/e4c56b09-63a1-4237-b86f-9aae4beeccca-kube-api-access-lc2j6\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.277256 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-run-httpd\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.277284 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-log-httpd\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.277342 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-config-data\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.277688 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.277798 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.277843 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-scripts\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.385125 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.385710 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lc2j6\" (UniqueName: \"kubernetes.io/projected/e4c56b09-63a1-4237-b86f-9aae4beeccca-kube-api-access-lc2j6\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.385862 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-run-httpd\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.386032 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-log-httpd\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.386199 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-config-data\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.386359 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.386544 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.386673 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-scripts\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.391048 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-log-httpd\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.401186 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-run-httpd\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.414407 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-config-data\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.428656 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-scripts\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.435083 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.439972 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lc2j6\" (UniqueName: \"kubernetes.io/projected/e4c56b09-63a1-4237-b86f-9aae4beeccca-kube-api-access-lc2j6\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.441059 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.472429 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.501087 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:19:38 crc kubenswrapper[4780]: I1210 11:19:38.695181 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97lpn" event={"ID":"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae","Type":"ContainerStarted","Data":"33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc"} Dec 10 11:19:39 crc kubenswrapper[4780]: I1210 11:19:39.225267 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.249:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:19:39 crc kubenswrapper[4780]: I1210 11:19:39.225305 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.249:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:19:40 crc kubenswrapper[4780]: I1210 11:19:40.021311 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9ca683b-68dc-4bf8-8977-0c9a6cef383b" path="/var/lib/kubelet/pods/c9ca683b-68dc-4bf8-8977-0c9a6cef383b/volumes" Dec 10 11:19:40 crc kubenswrapper[4780]: I1210 11:19:40.485551 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:19:40 crc kubenswrapper[4780]: I1210 11:19:40.754977 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:19:41 crc kubenswrapper[4780]: I1210 11:19:41.774208 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerStarted","Data":"f1cbcb0c2325f45451365c286fc38a5ce761e71d63be78e6ce618bbadf243604"} Dec 10 11:19:41 crc kubenswrapper[4780]: I1210 11:19:41.792754 4780 generic.go:334] "Generic (PLEG): container finished" podID="5e8191e7-63a9-42d8-9049-b315f1e86c2d" containerID="cb233546f0f0d370b2ae22b9f57643fe38785abd13a796b2e678f51556a5a1f2" exitCode=137 Dec 10 11:19:41 crc kubenswrapper[4780]: I1210 11:19:41.792954 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5e8191e7-63a9-42d8-9049-b315f1e86c2d","Type":"ContainerDied","Data":"cb233546f0f0d370b2ae22b9f57643fe38785abd13a796b2e678f51556a5a1f2"} Dec 10 11:19:41 crc kubenswrapper[4780]: I1210 11:19:41.798975 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerStarted","Data":"2a9bbab168b2be23e863946fb6dfa43f3bf09f6a94e5557863b9216b6fdc87aa"} Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.033050 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.069470 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.243031 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-config-data\") pod \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.243445 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-combined-ca-bundle\") pod \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.244065 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lm8g\" (UniqueName: \"kubernetes.io/projected/5e8191e7-63a9-42d8-9049-b315f1e86c2d-kube-api-access-5lm8g\") pod \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\" (UID: \"5e8191e7-63a9-42d8-9049-b315f1e86c2d\") " Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.252772 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e8191e7-63a9-42d8-9049-b315f1e86c2d-kube-api-access-5lm8g" (OuterVolumeSpecName: "kube-api-access-5lm8g") pod "5e8191e7-63a9-42d8-9049-b315f1e86c2d" (UID: "5e8191e7-63a9-42d8-9049-b315f1e86c2d"). InnerVolumeSpecName "kube-api-access-5lm8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.330103 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e8191e7-63a9-42d8-9049-b315f1e86c2d" (UID: "5e8191e7-63a9-42d8-9049-b315f1e86c2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.330215 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-config-data" (OuterVolumeSpecName: "config-data") pod "5e8191e7-63a9-42d8-9049-b315f1e86c2d" (UID: "5e8191e7-63a9-42d8-9049-b315f1e86c2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.346536 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.346588 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8191e7-63a9-42d8-9049-b315f1e86c2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.346602 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lm8g\" (UniqueName: \"kubernetes.io/projected/5e8191e7-63a9-42d8-9049-b315f1e86c2d-kube-api-access-5lm8g\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.815148 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5e8191e7-63a9-42d8-9049-b315f1e86c2d","Type":"ContainerDied","Data":"7bad8002e56743e69269b7a7d66ec3bc05288b1248944123f41b042decb3257a"} Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.816785 4780 scope.go:117] "RemoveContainer" containerID="cb233546f0f0d370b2ae22b9f57643fe38785abd13a796b2e678f51556a5a1f2" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.815517 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.874139 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.897661 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.919628 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:19:42 crc kubenswrapper[4780]: E1210 11:19:42.920651 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e8191e7-63a9-42d8-9049-b315f1e86c2d" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.920678 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e8191e7-63a9-42d8-9049-b315f1e86c2d" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.921140 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e8191e7-63a9-42d8-9049-b315f1e86c2d" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.922464 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.925117 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.931163 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.931492 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.934717 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.970619 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.970957 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swt6l\" (UniqueName: \"kubernetes.io/projected/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-kube-api-access-swt6l\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.971090 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.971149 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:42 crc kubenswrapper[4780]: I1210 11:19:42.971358 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.074344 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swt6l\" (UniqueName: \"kubernetes.io/projected/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-kube-api-access-swt6l\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.074448 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.074485 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.074567 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.074777 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.083783 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.086527 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.087065 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.087491 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.106236 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swt6l\" (UniqueName: \"kubernetes.io/projected/e89fd3a5-00af-4d5e-bcd3-246cff6f2d68-kube-api-access-swt6l\") pod \"nova-cell1-novncproxy-0\" (UID: \"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.273347 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.846772 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerStarted","Data":"d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999"} Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.987127 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e8191e7-63a9-42d8-9049-b315f1e86c2d" path="/var/lib/kubelet/pods/5e8191e7-63a9-42d8-9049-b315f1e86c2d/volumes" Dec 10 11:19:43 crc kubenswrapper[4780]: I1210 11:19:43.988582 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.483779 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.494440 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.503628 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.917985 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerStarted","Data":"1f045483cea1cd90becb2977645aace0fc7bf4b1571d2a09dff84094d3d9c8b8"} Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.922126 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerStarted","Data":"886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95"} Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.925904 4780 generic.go:334] "Generic (PLEG): container finished" podID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerID="33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc" exitCode=0 Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.925967 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97lpn" event={"ID":"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae","Type":"ContainerDied","Data":"33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc"} Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.929943 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68","Type":"ContainerStarted","Data":"0a74accaa32a7b561749d3bcd175b3ae1e8f499b27aed9a56117a2f8636dfb9c"} Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.930030 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e89fd3a5-00af-4d5e-bcd3-246cff6f2d68","Type":"ContainerStarted","Data":"a66ae919422815daa98a188a791eb14e0bb0e6b051d824ed56bdd5bb6829e75c"} Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.977682 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 10 11:19:45 crc kubenswrapper[4780]: I1210 11:19:45.997310 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.997282076 podStartE2EDuration="3.997282076s" podCreationTimestamp="2025-12-10 11:19:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:45.979942873 +0000 UTC m=+2090.833336316" watchObservedRunningTime="2025-12-10 11:19:45.997282076 +0000 UTC m=+2090.850675519" Dec 10 11:19:47 crc kubenswrapper[4780]: I1210 11:19:47.003750 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerStarted","Data":"efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a"} Dec 10 11:19:48 crc kubenswrapper[4780]: I1210 11:19:48.053765 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97lpn" event={"ID":"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae","Type":"ContainerStarted","Data":"0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb"} Dec 10 11:19:48 crc kubenswrapper[4780]: I1210 11:19:48.177482 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-97lpn" podStartSLOduration=3.6055519240000002 podStartE2EDuration="14.177440847s" podCreationTimestamp="2025-12-10 11:19:34 +0000 UTC" firstStartedPulling="2025-12-10 11:19:36.368179505 +0000 UTC m=+2081.221572948" lastFinishedPulling="2025-12-10 11:19:46.940068428 +0000 UTC m=+2091.793461871" observedRunningTime="2025-12-10 11:19:48.100146882 +0000 UTC m=+2092.953540345" watchObservedRunningTime="2025-12-10 11:19:48.177440847 +0000 UTC m=+2093.030834300" Dec 10 11:19:48 crc kubenswrapper[4780]: I1210 11:19:48.273638 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 11:19:48 crc kubenswrapper[4780]: I1210 11:19:48.275754 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:19:48 crc kubenswrapper[4780]: I1210 11:19:48.275839 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:48 crc kubenswrapper[4780]: I1210 11:19:48.322709 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 11:19:48 crc kubenswrapper[4780]: I1210 11:19:48.402268 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.076254 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.086705 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.620687 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw"] Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.633168 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.713015 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw"] Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.830129 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzrzs\" (UniqueName: \"kubernetes.io/projected/ea79dec7-12f6-4443-b129-233fad774365-kube-api-access-zzrzs\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.830231 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.830324 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.830431 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.830585 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.830656 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-config\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.933462 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.934076 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-config\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.934227 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzrzs\" (UniqueName: \"kubernetes.io/projected/ea79dec7-12f6-4443-b129-233fad774365-kube-api-access-zzrzs\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.934289 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.934371 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.934483 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.934848 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.935482 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-config\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.938811 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.939246 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:49 crc kubenswrapper[4780]: I1210 11:19:49.939246 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:50 crc kubenswrapper[4780]: I1210 11:19:50.057440 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzrzs\" (UniqueName: \"kubernetes.io/projected/ea79dec7-12f6-4443-b129-233fad774365-kube-api-access-zzrzs\") pod \"dnsmasq-dns-6b7bbf7cf9-xhxdw\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:50 crc kubenswrapper[4780]: I1210 11:19:50.108586 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-api" containerID="cri-o://8d01dafc17d1fbcd3e076e24bea29818b9359b2f327d963a78c13fa5998f177f" gracePeriod=30 Dec 10 11:19:50 crc kubenswrapper[4780]: I1210 11:19:50.108699 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerStarted","Data":"bceb163fcbcf587b9f40d7609abde1f15e4e433cbb9fda2b13bc1e3dbfd75a10"} Dec 10 11:19:50 crc kubenswrapper[4780]: I1210 11:19:50.108758 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-notifier" containerID="cri-o://1f045483cea1cd90becb2977645aace0fc7bf4b1571d2a09dff84094d3d9c8b8" gracePeriod=30 Dec 10 11:19:50 crc kubenswrapper[4780]: I1210 11:19:50.108821 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-listener" containerID="cri-o://bceb163fcbcf587b9f40d7609abde1f15e4e433cbb9fda2b13bc1e3dbfd75a10" gracePeriod=30 Dec 10 11:19:50 crc kubenswrapper[4780]: I1210 11:19:50.108876 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-evaluator" containerID="cri-o://2a9bbab168b2be23e863946fb6dfa43f3bf09f6a94e5557863b9216b6fdc87aa" gracePeriod=30 Dec 10 11:19:50 crc kubenswrapper[4780]: I1210 11:19:50.180745 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.596155456 podStartE2EDuration="18.180716139s" podCreationTimestamp="2025-12-10 11:19:32 +0000 UTC" firstStartedPulling="2025-12-10 11:19:34.21961266 +0000 UTC m=+2079.073006103" lastFinishedPulling="2025-12-10 11:19:48.804173343 +0000 UTC m=+2093.657566786" observedRunningTime="2025-12-10 11:19:50.156056039 +0000 UTC m=+2095.009449482" watchObservedRunningTime="2025-12-10 11:19:50.180716139 +0000 UTC m=+2095.034109572" Dec 10 11:19:50 crc kubenswrapper[4780]: I1210 11:19:50.302327 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.258639 4780 generic.go:334] "Generic (PLEG): container finished" podID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerID="8d01dafc17d1fbcd3e076e24bea29818b9359b2f327d963a78c13fa5998f177f" exitCode=0 Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.259280 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerDied","Data":"8d01dafc17d1fbcd3e076e24bea29818b9359b2f327d963a78c13fa5998f177f"} Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.338073 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerStarted","Data":"106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a"} Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.338621 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.338492 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="proxy-httpd" containerID="cri-o://106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a" gracePeriod=30 Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.338083 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="ceilometer-central-agent" containerID="cri-o://d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999" gracePeriod=30 Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.338523 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="ceilometer-notification-agent" containerID="cri-o://886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95" gracePeriod=30 Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.338511 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="sg-core" containerID="cri-o://efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a" gracePeriod=30 Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.444822 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.491650668 podStartE2EDuration="13.444788331s" podCreationTimestamp="2025-12-10 11:19:38 +0000 UTC" firstStartedPulling="2025-12-10 11:19:40.848142356 +0000 UTC m=+2085.701535799" lastFinishedPulling="2025-12-10 11:19:48.801280019 +0000 UTC m=+2093.654673462" observedRunningTime="2025-12-10 11:19:51.390491184 +0000 UTC m=+2096.243884627" watchObservedRunningTime="2025-12-10 11:19:51.444788331 +0000 UTC m=+2096.298181774" Dec 10 11:19:51 crc kubenswrapper[4780]: I1210 11:19:51.647132 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw"] Dec 10 11:19:52 crc kubenswrapper[4780]: E1210 11:19:52.278160 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4c56b09_63a1_4237_b86f_9aae4beeccca.slice/crio-efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e71b1c2_9d57_4a03_b5c3_355c9f8ffb38.slice/crio-conmon-8d01dafc17d1fbcd3e076e24bea29818b9359b2f327d963a78c13fa5998f177f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e71b1c2_9d57_4a03_b5c3_355c9f8ffb38.slice/crio-2a9bbab168b2be23e863946fb6dfa43f3bf09f6a94e5557863b9216b6fdc87aa.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4c56b09_63a1_4237_b86f_9aae4beeccca.slice/crio-conmon-106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4c56b09_63a1_4237_b86f_9aae4beeccca.slice/crio-conmon-efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.374699 4780 generic.go:334] "Generic (PLEG): container finished" podID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerID="1f045483cea1cd90becb2977645aace0fc7bf4b1571d2a09dff84094d3d9c8b8" exitCode=0 Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.374992 4780 generic.go:334] "Generic (PLEG): container finished" podID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerID="2a9bbab168b2be23e863946fb6dfa43f3bf09f6a94e5557863b9216b6fdc87aa" exitCode=0 Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.375009 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerDied","Data":"1f045483cea1cd90becb2977645aace0fc7bf4b1571d2a09dff84094d3d9c8b8"} Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.375120 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerDied","Data":"2a9bbab168b2be23e863946fb6dfa43f3bf09f6a94e5557863b9216b6fdc87aa"} Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.385937 4780 generic.go:334] "Generic (PLEG): container finished" podID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerID="106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a" exitCode=0 Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.385988 4780 generic.go:334] "Generic (PLEG): container finished" podID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerID="efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a" exitCode=2 Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.386004 4780 generic.go:334] "Generic (PLEG): container finished" podID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerID="886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95" exitCode=0 Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.386110 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerDied","Data":"106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a"} Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.386160 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerDied","Data":"efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a"} Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.386179 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerDied","Data":"886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95"} Dec 10 11:19:52 crc kubenswrapper[4780]: I1210 11:19:52.391753 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" event={"ID":"ea79dec7-12f6-4443-b129-233fad774365","Type":"ContainerStarted","Data":"72c7c7ea41474d2e47a438870372e1faf2a2f1c93779dc9f5f99bd94a19d2702"} Dec 10 11:19:53 crc kubenswrapper[4780]: I1210 11:19:53.273981 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:53 crc kubenswrapper[4780]: I1210 11:19:53.306727 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:53 crc kubenswrapper[4780]: I1210 11:19:53.431820 4780 generic.go:334] "Generic (PLEG): container finished" podID="ea79dec7-12f6-4443-b129-233fad774365" containerID="b46304c13c5a74684f5610ec4bd7241e0821086de5aef6ae57a8dc81ad1d8397" exitCode=0 Dec 10 11:19:53 crc kubenswrapper[4780]: I1210 11:19:53.435167 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" event={"ID":"ea79dec7-12f6-4443-b129-233fad774365","Type":"ContainerDied","Data":"b46304c13c5a74684f5610ec4bd7241e0821086de5aef6ae57a8dc81ad1d8397"} Dec 10 11:19:53 crc kubenswrapper[4780]: I1210 11:19:53.500657 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.154056 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-qkv7r"] Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.156802 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.166018 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.166412 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.227274 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.227464 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-config-data\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.227496 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-scripts\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.227674 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl5rp\" (UniqueName: \"kubernetes.io/projected/d296ea05-7391-4cd2-a807-1168a1547cb6-kube-api-access-rl5rp\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.264678 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-qkv7r"] Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.332550 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl5rp\" (UniqueName: \"kubernetes.io/projected/d296ea05-7391-4cd2-a807-1168a1547cb6-kube-api-access-rl5rp\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.332820 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.333017 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-config-data\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.333075 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-scripts\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.345023 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-config-data\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.345058 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.353891 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-scripts\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.359224 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl5rp\" (UniqueName: \"kubernetes.io/projected/d296ea05-7391-4cd2-a807-1168a1547cb6-kube-api-access-rl5rp\") pod \"nova-cell1-cell-mapping-qkv7r\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.458949 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" event={"ID":"ea79dec7-12f6-4443-b129-233fad774365","Type":"ContainerStarted","Data":"91373aa1f93d376da96852e2ff5f1f90415bb446df36cd4f8fc66e8e029eb6d8"} Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.459053 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.493628 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" podStartSLOduration=5.49360293 podStartE2EDuration="5.49360293s" podCreationTimestamp="2025-12-10 11:19:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:54.486098809 +0000 UTC m=+2099.339492262" watchObservedRunningTime="2025-12-10 11:19:54.49360293 +0000 UTC m=+2099.346996373" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.594480 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.854628 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:54 crc kubenswrapper[4780]: I1210 11:19:54.862540 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:19:55 crc kubenswrapper[4780]: I1210 11:19:55.463016 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-qkv7r"] Dec 10 11:19:55 crc kubenswrapper[4780]: I1210 11:19:55.806874 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:19:55 crc kubenswrapper[4780]: I1210 11:19:55.808779 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-log" containerID="cri-o://04c6453086c9006225a89468ef8172eeb34cc49a69eb5b5d8d50eb37105d630a" gracePeriod=30 Dec 10 11:19:55 crc kubenswrapper[4780]: I1210 11:19:55.809406 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-api" containerID="cri-o://695dec36217ee2ffa30feaa495b93820a10b03452cbb95b902ac64304276af8d" gracePeriod=30 Dec 10 11:19:56 crc kubenswrapper[4780]: I1210 11:19:56.055538 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-97lpn" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="registry-server" probeResult="failure" output=< Dec 10 11:19:56 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:19:56 crc kubenswrapper[4780]: > Dec 10 11:19:56 crc kubenswrapper[4780]: I1210 11:19:56.580048 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qkv7r" event={"ID":"d296ea05-7391-4cd2-a807-1168a1547cb6","Type":"ContainerStarted","Data":"58e831f01e8b2dac91b4455f4311f9c942493c697cffcabc861b705c151f9418"} Dec 10 11:19:56 crc kubenswrapper[4780]: I1210 11:19:56.580176 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qkv7r" event={"ID":"d296ea05-7391-4cd2-a807-1168a1547cb6","Type":"ContainerStarted","Data":"9be787ecbd984f532d736abb786f8e80946d799871cf68d8600515985cc99182"} Dec 10 11:19:56 crc kubenswrapper[4780]: I1210 11:19:56.642042 4780 generic.go:334] "Generic (PLEG): container finished" podID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerID="04c6453086c9006225a89468ef8172eeb34cc49a69eb5b5d8d50eb37105d630a" exitCode=143 Dec 10 11:19:56 crc kubenswrapper[4780]: I1210 11:19:56.643708 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156","Type":"ContainerDied","Data":"04c6453086c9006225a89468ef8172eeb34cc49a69eb5b5d8d50eb37105d630a"} Dec 10 11:19:56 crc kubenswrapper[4780]: I1210 11:19:56.665190 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-qkv7r" podStartSLOduration=2.665154551 podStartE2EDuration="2.665154551s" podCreationTimestamp="2025-12-10 11:19:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:19:56.631121592 +0000 UTC m=+2101.484515035" watchObservedRunningTime="2025-12-10 11:19:56.665154551 +0000 UTC m=+2101.518547994" Dec 10 11:19:59 crc kubenswrapper[4780]: I1210 11:19:59.366376 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.249:8774/\": read tcp 10.217.0.2:57774->10.217.0.249:8774: read: connection reset by peer" Dec 10 11:19:59 crc kubenswrapper[4780]: I1210 11:19:59.366567 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.249:8774/\": read tcp 10.217.0.2:57786->10.217.0.249:8774: read: connection reset by peer" Dec 10 11:19:59 crc kubenswrapper[4780]: I1210 11:19:59.689378 4780 generic.go:334] "Generic (PLEG): container finished" podID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerID="695dec36217ee2ffa30feaa495b93820a10b03452cbb95b902ac64304276af8d" exitCode=0 Dec 10 11:19:59 crc kubenswrapper[4780]: I1210 11:19:59.689482 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156","Type":"ContainerDied","Data":"695dec36217ee2ffa30feaa495b93820a10b03452cbb95b902ac64304276af8d"} Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.306758 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.458223 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-l52cn"] Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.458686 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" podUID="b931e42a-a6d4-4b05-996c-f18454ddab28" containerName="dnsmasq-dns" containerID="cri-o://30530f8adbd8b2d90778923229046797e464c38a1a02084cc8dfcd0654b44c5a" gracePeriod=10 Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.753715 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156","Type":"ContainerDied","Data":"89d5a0e12818aa11ff91557dd1235f59aa25084907f31a3f764fba6ca11223ab"} Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.753806 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89d5a0e12818aa11ff91557dd1235f59aa25084907f31a3f764fba6ca11223ab" Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.772193 4780 generic.go:334] "Generic (PLEG): container finished" podID="b931e42a-a6d4-4b05-996c-f18454ddab28" containerID="30530f8adbd8b2d90778923229046797e464c38a1a02084cc8dfcd0654b44c5a" exitCode=0 Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.772307 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" event={"ID":"b931e42a-a6d4-4b05-996c-f18454ddab28","Type":"ContainerDied","Data":"30530f8adbd8b2d90778923229046797e464c38a1a02084cc8dfcd0654b44c5a"} Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.837379 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.931140 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-logs\") pod \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.931860 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-config-data\") pod \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.932479 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9g9t\" (UniqueName: \"kubernetes.io/projected/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-kube-api-access-g9g9t\") pod \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.932685 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-combined-ca-bundle\") pod \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\" (UID: \"0dda95c4-c42b-4f0e-b5c6-9d9e55f99156\") " Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.932225 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-logs" (OuterVolumeSpecName: "logs") pod "0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" (UID: "0dda95c4-c42b-4f0e-b5c6-9d9e55f99156"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.937081 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:00 crc kubenswrapper[4780]: I1210 11:20:00.943260 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-kube-api-access-g9g9t" (OuterVolumeSpecName: "kube-api-access-g9g9t") pod "0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" (UID: "0dda95c4-c42b-4f0e-b5c6-9d9e55f99156"). InnerVolumeSpecName "kube-api-access-g9g9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.023150 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" (UID: "0dda95c4-c42b-4f0e-b5c6-9d9e55f99156"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.077089 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9g9t\" (UniqueName: \"kubernetes.io/projected/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-kube-api-access-g9g9t\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.077157 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.096900 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-config-data" (OuterVolumeSpecName: "config-data") pod "0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" (UID: "0dda95c4-c42b-4f0e-b5c6-9d9e55f99156"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.187390 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.206040 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.288238 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxglv\" (UniqueName: \"kubernetes.io/projected/b931e42a-a6d4-4b05-996c-f18454ddab28-kube-api-access-lxglv\") pod \"b931e42a-a6d4-4b05-996c-f18454ddab28\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.288318 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-nb\") pod \"b931e42a-a6d4-4b05-996c-f18454ddab28\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.288435 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-svc\") pod \"b931e42a-a6d4-4b05-996c-f18454ddab28\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.288483 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-sb\") pod \"b931e42a-a6d4-4b05-996c-f18454ddab28\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.288573 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-swift-storage-0\") pod \"b931e42a-a6d4-4b05-996c-f18454ddab28\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.288636 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-config\") pod \"b931e42a-a6d4-4b05-996c-f18454ddab28\" (UID: \"b931e42a-a6d4-4b05-996c-f18454ddab28\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.321244 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b931e42a-a6d4-4b05-996c-f18454ddab28-kube-api-access-lxglv" (OuterVolumeSpecName: "kube-api-access-lxglv") pod "b931e42a-a6d4-4b05-996c-f18454ddab28" (UID: "b931e42a-a6d4-4b05-996c-f18454ddab28"). InnerVolumeSpecName "kube-api-access-lxglv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.409743 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxglv\" (UniqueName: \"kubernetes.io/projected/b931e42a-a6d4-4b05-996c-f18454ddab28-kube-api-access-lxglv\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.433564 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b931e42a-a6d4-4b05-996c-f18454ddab28" (UID: "b931e42a-a6d4-4b05-996c-f18454ddab28"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.450801 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b931e42a-a6d4-4b05-996c-f18454ddab28" (UID: "b931e42a-a6d4-4b05-996c-f18454ddab28"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.483783 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b931e42a-a6d4-4b05-996c-f18454ddab28" (UID: "b931e42a-a6d4-4b05-996c-f18454ddab28"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.495054 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b931e42a-a6d4-4b05-996c-f18454ddab28" (UID: "b931e42a-a6d4-4b05-996c-f18454ddab28"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.496691 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-config" (OuterVolumeSpecName: "config") pod "b931e42a-a6d4-4b05-996c-f18454ddab28" (UID: "b931e42a-a6d4-4b05-996c-f18454ddab28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.532371 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.568938 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.568998 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.569013 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.569032 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.569048 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b931e42a-a6d4-4b05-996c-f18454ddab28-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.674897 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lc2j6\" (UniqueName: \"kubernetes.io/projected/e4c56b09-63a1-4237-b86f-9aae4beeccca-kube-api-access-lc2j6\") pod \"e4c56b09-63a1-4237-b86f-9aae4beeccca\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.675643 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-scripts\") pod \"e4c56b09-63a1-4237-b86f-9aae4beeccca\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.675879 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-combined-ca-bundle\") pod \"e4c56b09-63a1-4237-b86f-9aae4beeccca\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.676053 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-ceilometer-tls-certs\") pod \"e4c56b09-63a1-4237-b86f-9aae4beeccca\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.676116 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-config-data\") pod \"e4c56b09-63a1-4237-b86f-9aae4beeccca\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.676143 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-sg-core-conf-yaml\") pod \"e4c56b09-63a1-4237-b86f-9aae4beeccca\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.676198 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-run-httpd\") pod \"e4c56b09-63a1-4237-b86f-9aae4beeccca\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.676226 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-log-httpd\") pod \"e4c56b09-63a1-4237-b86f-9aae4beeccca\" (UID: \"e4c56b09-63a1-4237-b86f-9aae4beeccca\") " Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.679572 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e4c56b09-63a1-4237-b86f-9aae4beeccca" (UID: "e4c56b09-63a1-4237-b86f-9aae4beeccca"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.680019 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e4c56b09-63a1-4237-b86f-9aae4beeccca" (UID: "e4c56b09-63a1-4237-b86f-9aae4beeccca"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.701235 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4c56b09-63a1-4237-b86f-9aae4beeccca-kube-api-access-lc2j6" (OuterVolumeSpecName: "kube-api-access-lc2j6") pod "e4c56b09-63a1-4237-b86f-9aae4beeccca" (UID: "e4c56b09-63a1-4237-b86f-9aae4beeccca"). InnerVolumeSpecName "kube-api-access-lc2j6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.702798 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-scripts" (OuterVolumeSpecName: "scripts") pod "e4c56b09-63a1-4237-b86f-9aae4beeccca" (UID: "e4c56b09-63a1-4237-b86f-9aae4beeccca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.741166 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e4c56b09-63a1-4237-b86f-9aae4beeccca" (UID: "e4c56b09-63a1-4237-b86f-9aae4beeccca"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.780535 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.780595 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.780613 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e4c56b09-63a1-4237-b86f-9aae4beeccca-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.780628 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lc2j6\" (UniqueName: \"kubernetes.io/projected/e4c56b09-63a1-4237-b86f-9aae4beeccca-kube-api-access-lc2j6\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.780645 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.827081 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" event={"ID":"b931e42a-a6d4-4b05-996c-f18454ddab28","Type":"ContainerDied","Data":"eb6b00be16869751175660dc049b2cea81047828804d82e8a7bea742a1b731d8"} Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.827199 4780 scope.go:117] "RemoveContainer" containerID="30530f8adbd8b2d90778923229046797e464c38a1a02084cc8dfcd0654b44c5a" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.827665 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-l52cn" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.848054 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e4c56b09-63a1-4237-b86f-9aae4beeccca" (UID: "e4c56b09-63a1-4237-b86f-9aae4beeccca"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.849355 4780 generic.go:334] "Generic (PLEG): container finished" podID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerID="d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999" exitCode=0 Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.849629 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.849689 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.849758 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerDied","Data":"d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999"} Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.849872 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e4c56b09-63a1-4237-b86f-9aae4beeccca","Type":"ContainerDied","Data":"f1cbcb0c2325f45451365c286fc38a5ce761e71d63be78e6ce618bbadf243604"} Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.886133 4780 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.972354 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4c56b09-63a1-4237-b86f-9aae4beeccca" (UID: "e4c56b09-63a1-4237-b86f-9aae4beeccca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:01 crc kubenswrapper[4780]: I1210 11:20:01.990606 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.012133 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-config-data" (OuterVolumeSpecName: "config-data") pod "e4c56b09-63a1-4237-b86f-9aae4beeccca" (UID: "e4c56b09-63a1-4237-b86f-9aae4beeccca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.093936 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4c56b09-63a1-4237-b86f-9aae4beeccca-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.111554 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.121249 4780 scope.go:117] "RemoveContainer" containerID="e9d20fcc6ee073b52b9ece46837ae2ee81c6d77ff3c1c12a408c69f318c2a6e6" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.143452 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.159952 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-l52cn"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.167201 4780 scope.go:117] "RemoveContainer" containerID="106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.181268 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-l52cn"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.194895 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.195949 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-log" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.195977 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-log" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.196012 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="ceilometer-notification-agent" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196029 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="ceilometer-notification-agent" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.196041 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b931e42a-a6d4-4b05-996c-f18454ddab28" containerName="init" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196049 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b931e42a-a6d4-4b05-996c-f18454ddab28" containerName="init" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.196072 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="ceilometer-central-agent" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196080 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="ceilometer-central-agent" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.196099 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="proxy-httpd" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196106 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="proxy-httpd" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.196125 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="sg-core" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196132 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="sg-core" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.196165 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-api" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196173 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-api" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.196192 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b931e42a-a6d4-4b05-996c-f18454ddab28" containerName="dnsmasq-dns" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196199 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b931e42a-a6d4-4b05-996c-f18454ddab28" containerName="dnsmasq-dns" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196533 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-api" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196557 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="sg-core" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196576 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="b931e42a-a6d4-4b05-996c-f18454ddab28" containerName="dnsmasq-dns" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196599 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="proxy-httpd" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196613 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" containerName="nova-api-log" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196634 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="ceilometer-notification-agent" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.196645 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" containerName="ceilometer-central-agent" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.198733 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.206203 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.218955 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.219272 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.219609 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.223676 4780 scope.go:117] "RemoveContainer" containerID="efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.232188 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.246336 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.276246 4780 scope.go:117] "RemoveContainer" containerID="886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.294290 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.298496 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.304482 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.311432 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.311794 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.316261 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.316346 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38d1b2e8-babc-4999-a9ff-9737e801058a-logs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.316832 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-public-tls-certs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.317622 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqn4z\" (UniqueName: \"kubernetes.io/projected/38d1b2e8-babc-4999-a9ff-9737e801058a-kube-api-access-cqn4z\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.318271 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-config-data\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.319461 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.325064 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.345894 4780 scope.go:117] "RemoveContainer" containerID="d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.374979 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.377337 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceilometer-tls-certs combined-ca-bundle config-data kube-api-access-mkcpb log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="954046b3-5d7a-40da-b29b-0f491c535025" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.398457 4780 scope.go:117] "RemoveContainer" containerID="106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.399857 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a\": container with ID starting with 106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a not found: ID does not exist" containerID="106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.399902 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a"} err="failed to get container status \"106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a\": rpc error: code = NotFound desc = could not find container \"106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a\": container with ID starting with 106a8aa3d9595a27d3dfcc9e2d90f339e3f529be2113b542af7db294afe9c81a not found: ID does not exist" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.399960 4780 scope.go:117] "RemoveContainer" containerID="efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.400392 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a\": container with ID starting with efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a not found: ID does not exist" containerID="efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.400415 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a"} err="failed to get container status \"efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a\": rpc error: code = NotFound desc = could not find container \"efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a\": container with ID starting with efe4c859e4036c7962129edb6481bd90df6cfef8813cd570a1f31635885ff74a not found: ID does not exist" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.400435 4780 scope.go:117] "RemoveContainer" containerID="886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.400720 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95\": container with ID starting with 886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95 not found: ID does not exist" containerID="886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.400741 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95"} err="failed to get container status \"886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95\": rpc error: code = NotFound desc = could not find container \"886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95\": container with ID starting with 886d8fd5bbff107ab1d905b8b1fbb38d523bfbb7616fb45b1258d576ef28bf95 not found: ID does not exist" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.400773 4780 scope.go:117] "RemoveContainer" containerID="d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999" Dec 10 11:20:02 crc kubenswrapper[4780]: E1210 11:20:02.401354 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999\": container with ID starting with d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999 not found: ID does not exist" containerID="d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.401425 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999"} err="failed to get container status \"d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999\": rpc error: code = NotFound desc = could not find container \"d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999\": container with ID starting with d5f720bbbb469159d3726868dfb59b46d5ebca0acc9e519baba7c4d86c6fc999 not found: ID does not exist" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424252 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424332 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424358 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-log-httpd\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424385 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424428 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38d1b2e8-babc-4999-a9ff-9737e801058a-logs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424542 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-public-tls-certs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424592 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-run-httpd\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424611 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-config-data\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424657 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqn4z\" (UniqueName: \"kubernetes.io/projected/38d1b2e8-babc-4999-a9ff-9737e801058a-kube-api-access-cqn4z\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424693 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424801 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-config-data\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.424849 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-scripts\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.425026 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkcpb\" (UniqueName: \"kubernetes.io/projected/954046b3-5d7a-40da-b29b-0f491c535025-kube-api-access-mkcpb\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.425095 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.428169 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38d1b2e8-babc-4999-a9ff-9737e801058a-logs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.430522 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.431813 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.432749 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-config-data\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.435991 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-public-tls-certs\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.461755 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqn4z\" (UniqueName: \"kubernetes.io/projected/38d1b2e8-babc-4999-a9ff-9737e801058a-kube-api-access-cqn4z\") pod \"nova-api-0\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.529935 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.530011 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-log-httpd\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.530044 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.530178 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-run-httpd\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.530212 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-config-data\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.530272 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.530391 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-scripts\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.530423 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkcpb\" (UniqueName: \"kubernetes.io/projected/954046b3-5d7a-40da-b29b-0f491c535025-kube-api-access-mkcpb\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.530600 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-log-httpd\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.532089 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-run-httpd\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.535123 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-config-data\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.535995 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.536161 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.537465 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.542456 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-scripts\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.557672 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkcpb\" (UniqueName: \"kubernetes.io/projected/954046b3-5d7a-40da-b29b-0f491c535025-kube-api-access-mkcpb\") pod \"ceilometer-0\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.574214 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.934787 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:02 crc kubenswrapper[4780]: I1210 11:20:02.974171 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.104496 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-scripts\") pod \"954046b3-5d7a-40da-b29b-0f491c535025\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.106692 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-sg-core-conf-yaml\") pod \"954046b3-5d7a-40da-b29b-0f491c535025\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.106868 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-log-httpd\") pod \"954046b3-5d7a-40da-b29b-0f491c535025\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.115331 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkcpb\" (UniqueName: \"kubernetes.io/projected/954046b3-5d7a-40da-b29b-0f491c535025-kube-api-access-mkcpb\") pod \"954046b3-5d7a-40da-b29b-0f491c535025\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.115537 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-config-data\") pod \"954046b3-5d7a-40da-b29b-0f491c535025\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.115711 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-combined-ca-bundle\") pod \"954046b3-5d7a-40da-b29b-0f491c535025\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.115937 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-run-httpd\") pod \"954046b3-5d7a-40da-b29b-0f491c535025\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.116060 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-ceilometer-tls-certs\") pod \"954046b3-5d7a-40da-b29b-0f491c535025\" (UID: \"954046b3-5d7a-40da-b29b-0f491c535025\") " Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.119656 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "954046b3-5d7a-40da-b29b-0f491c535025" (UID: "954046b3-5d7a-40da-b29b-0f491c535025"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.120160 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "954046b3-5d7a-40da-b29b-0f491c535025" (UID: "954046b3-5d7a-40da-b29b-0f491c535025"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.130857 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-scripts" (OuterVolumeSpecName: "scripts") pod "954046b3-5d7a-40da-b29b-0f491c535025" (UID: "954046b3-5d7a-40da-b29b-0f491c535025"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.137294 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "954046b3-5d7a-40da-b29b-0f491c535025" (UID: "954046b3-5d7a-40da-b29b-0f491c535025"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.152274 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "954046b3-5d7a-40da-b29b-0f491c535025" (UID: "954046b3-5d7a-40da-b29b-0f491c535025"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.170209 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "954046b3-5d7a-40da-b29b-0f491c535025" (UID: "954046b3-5d7a-40da-b29b-0f491c535025"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.170482 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/954046b3-5d7a-40da-b29b-0f491c535025-kube-api-access-mkcpb" (OuterVolumeSpecName: "kube-api-access-mkcpb") pod "954046b3-5d7a-40da-b29b-0f491c535025" (UID: "954046b3-5d7a-40da-b29b-0f491c535025"). InnerVolumeSpecName "kube-api-access-mkcpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.184783 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-config-data" (OuterVolumeSpecName: "config-data") pod "954046b3-5d7a-40da-b29b-0f491c535025" (UID: "954046b3-5d7a-40da-b29b-0f491c535025"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.220065 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.220464 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.220566 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkcpb\" (UniqueName: \"kubernetes.io/projected/954046b3-5d7a-40da-b29b-0f491c535025-kube-api-access-mkcpb\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.220631 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.220693 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.220750 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/954046b3-5d7a-40da-b29b-0f491c535025-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.220814 4780 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.220881 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/954046b3-5d7a-40da-b29b-0f491c535025-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.389597 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.965743 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.978770 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dda95c4-c42b-4f0e-b5c6-9d9e55f99156" path="/var/lib/kubelet/pods/0dda95c4-c42b-4f0e-b5c6-9d9e55f99156/volumes" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.979597 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b931e42a-a6d4-4b05-996c-f18454ddab28" path="/var/lib/kubelet/pods/b931e42a-a6d4-4b05-996c-f18454ddab28/volumes" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.980621 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4c56b09-63a1-4237-b86f-9aae4beeccca" path="/var/lib/kubelet/pods/e4c56b09-63a1-4237-b86f-9aae4beeccca/volumes" Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.982723 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38d1b2e8-babc-4999-a9ff-9737e801058a","Type":"ContainerStarted","Data":"923fe5b17462bc697d8d975430e1b101178f8f0a637c1e1eedb426cd3c3254b8"} Dec 10 11:20:03 crc kubenswrapper[4780]: I1210 11:20:03.982759 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38d1b2e8-babc-4999-a9ff-9737e801058a","Type":"ContainerStarted","Data":"0ba9e1cbac4b77db11334aec3205785ad402ec481d787478e14c5c393a445afc"} Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.161310 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.200615 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.241371 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.247643 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.253193 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.253591 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.253762 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.261574 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.375374 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.376038 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-run-httpd\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.376290 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-log-httpd\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.376549 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-config-data\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.376588 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-scripts\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.376727 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.376793 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-768kb\" (UniqueName: \"kubernetes.io/projected/cba3d639-6379-42fd-bdb7-072dcf87f78f-kube-api-access-768kb\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.378203 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.480543 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-config-data\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.480642 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-scripts\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.480713 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.480758 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-768kb\" (UniqueName: \"kubernetes.io/projected/cba3d639-6379-42fd-bdb7-072dcf87f78f-kube-api-access-768kb\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.480905 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.480974 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.481025 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-run-httpd\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.481060 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-log-httpd\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.481787 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-log-httpd\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.482554 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-run-httpd\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.487091 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-scripts\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.490618 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.492807 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.494205 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.496469 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-config-data\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.503627 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-768kb\" (UniqueName: \"kubernetes.io/projected/cba3d639-6379-42fd-bdb7-072dcf87f78f-kube-api-access-768kb\") pod \"ceilometer-0\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " pod="openstack/ceilometer-0" Dec 10 11:20:04 crc kubenswrapper[4780]: I1210 11:20:04.581681 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:05 crc kubenswrapper[4780]: I1210 11:20:05.306945 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38d1b2e8-babc-4999-a9ff-9737e801058a","Type":"ContainerStarted","Data":"7c29bf646fe2a0a144febff357251e1518947775aef5ecc54c88b97f1496cd7f"} Dec 10 11:20:05 crc kubenswrapper[4780]: I1210 11:20:05.355182 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.35331451 podStartE2EDuration="3.35331451s" podCreationTimestamp="2025-12-10 11:20:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:20:05.346142096 +0000 UTC m=+2110.199535539" watchObservedRunningTime="2025-12-10 11:20:05.35331451 +0000 UTC m=+2110.206707953" Dec 10 11:20:05 crc kubenswrapper[4780]: I1210 11:20:05.423896 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:05 crc kubenswrapper[4780]: I1210 11:20:05.981409 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="954046b3-5d7a-40da-b29b-0f491c535025" path="/var/lib/kubelet/pods/954046b3-5d7a-40da-b29b-0f491c535025/volumes" Dec 10 11:20:06 crc kubenswrapper[4780]: I1210 11:20:06.232055 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-97lpn" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="registry-server" probeResult="failure" output=< Dec 10 11:20:06 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:20:06 crc kubenswrapper[4780]: > Dec 10 11:20:06 crc kubenswrapper[4780]: I1210 11:20:06.340346 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerStarted","Data":"e437f6730268f7f2ad69a06408eead4fa0089bac665f00f6679622c4f1c5b30c"} Dec 10 11:20:06 crc kubenswrapper[4780]: I1210 11:20:06.344043 4780 generic.go:334] "Generic (PLEG): container finished" podID="d296ea05-7391-4cd2-a807-1168a1547cb6" containerID="58e831f01e8b2dac91b4455f4311f9c942493c697cffcabc861b705c151f9418" exitCode=0 Dec 10 11:20:06 crc kubenswrapper[4780]: I1210 11:20:06.345648 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qkv7r" event={"ID":"d296ea05-7391-4cd2-a807-1168a1547cb6","Type":"ContainerDied","Data":"58e831f01e8b2dac91b4455f4311f9c942493c697cffcabc861b705c151f9418"} Dec 10 11:20:07 crc kubenswrapper[4780]: I1210 11:20:07.397321 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerStarted","Data":"25f0df2fa9b782063ef3d7c20587bb703538055926bde0fe32614c8c7aef7812"} Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.461379 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerStarted","Data":"7464de1995d228f9845884516bd18c1e32598a04ab32fd93b3925a534f3a5771"} Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.469294 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.469825 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qkv7r" event={"ID":"d296ea05-7391-4cd2-a807-1168a1547cb6","Type":"ContainerDied","Data":"9be787ecbd984f532d736abb786f8e80946d799871cf68d8600515985cc99182"} Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.469997 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9be787ecbd984f532d736abb786f8e80946d799871cf68d8600515985cc99182" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.595380 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-config-data\") pod \"d296ea05-7391-4cd2-a807-1168a1547cb6\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.596050 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-combined-ca-bundle\") pod \"d296ea05-7391-4cd2-a807-1168a1547cb6\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.596996 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rl5rp\" (UniqueName: \"kubernetes.io/projected/d296ea05-7391-4cd2-a807-1168a1547cb6-kube-api-access-rl5rp\") pod \"d296ea05-7391-4cd2-a807-1168a1547cb6\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.597146 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-scripts\") pod \"d296ea05-7391-4cd2-a807-1168a1547cb6\" (UID: \"d296ea05-7391-4cd2-a807-1168a1547cb6\") " Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.609092 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-scripts" (OuterVolumeSpecName: "scripts") pod "d296ea05-7391-4cd2-a807-1168a1547cb6" (UID: "d296ea05-7391-4cd2-a807-1168a1547cb6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.619673 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d296ea05-7391-4cd2-a807-1168a1547cb6-kube-api-access-rl5rp" (OuterVolumeSpecName: "kube-api-access-rl5rp") pod "d296ea05-7391-4cd2-a807-1168a1547cb6" (UID: "d296ea05-7391-4cd2-a807-1168a1547cb6"). InnerVolumeSpecName "kube-api-access-rl5rp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.663143 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-config-data" (OuterVolumeSpecName: "config-data") pod "d296ea05-7391-4cd2-a807-1168a1547cb6" (UID: "d296ea05-7391-4cd2-a807-1168a1547cb6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.679894 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d296ea05-7391-4cd2-a807-1168a1547cb6" (UID: "d296ea05-7391-4cd2-a807-1168a1547cb6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.710773 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.710848 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.710866 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d296ea05-7391-4cd2-a807-1168a1547cb6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:08 crc kubenswrapper[4780]: I1210 11:20:08.710885 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rl5rp\" (UniqueName: \"kubernetes.io/projected/d296ea05-7391-4cd2-a807-1168a1547cb6-kube-api-access-rl5rp\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.489021 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerStarted","Data":"ea52ddb186d02a6a57a149772c450dfda5d185d102ab3134eca37301437fe980"} Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.489054 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qkv7r" Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.752257 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.753121 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerName="nova-api-log" containerID="cri-o://923fe5b17462bc697d8d975430e1b101178f8f0a637c1e1eedb426cd3c3254b8" gracePeriod=30 Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.753269 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerName="nova-api-api" containerID="cri-o://7c29bf646fe2a0a144febff357251e1518947775aef5ecc54c88b97f1496cd7f" gracePeriod=30 Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.780225 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.780569 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="462049e2-388d-465c-b954-3df68a7c0aea" containerName="nova-scheduler-scheduler" containerID="cri-o://a1a518432f9312ba0c4fd146c5002890d9676b95de8e2e786c1880cb6b14f211" gracePeriod=30 Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.811817 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.812219 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-log" containerID="cri-o://eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba" gracePeriod=30 Dec 10 11:20:09 crc kubenswrapper[4780]: I1210 11:20:09.812308 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-metadata" containerID="cri-o://05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb" gracePeriod=30 Dec 10 11:20:10 crc kubenswrapper[4780]: I1210 11:20:10.516653 4780 generic.go:334] "Generic (PLEG): container finished" podID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerID="eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba" exitCode=143 Dec 10 11:20:10 crc kubenswrapper[4780]: I1210 11:20:10.516764 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"dec862f9-f21f-4c0f-9bc6-48d2d017c57e","Type":"ContainerDied","Data":"eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba"} Dec 10 11:20:10 crc kubenswrapper[4780]: I1210 11:20:10.525799 4780 generic.go:334] "Generic (PLEG): container finished" podID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerID="7c29bf646fe2a0a144febff357251e1518947775aef5ecc54c88b97f1496cd7f" exitCode=0 Dec 10 11:20:10 crc kubenswrapper[4780]: I1210 11:20:10.526319 4780 generic.go:334] "Generic (PLEG): container finished" podID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerID="923fe5b17462bc697d8d975430e1b101178f8f0a637c1e1eedb426cd3c3254b8" exitCode=143 Dec 10 11:20:10 crc kubenswrapper[4780]: I1210 11:20:10.525911 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38d1b2e8-babc-4999-a9ff-9737e801058a","Type":"ContainerDied","Data":"7c29bf646fe2a0a144febff357251e1518947775aef5ecc54c88b97f1496cd7f"} Dec 10 11:20:10 crc kubenswrapper[4780]: I1210 11:20:10.526606 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38d1b2e8-babc-4999-a9ff-9737e801058a","Type":"ContainerDied","Data":"923fe5b17462bc697d8d975430e1b101178f8f0a637c1e1eedb426cd3c3254b8"} Dec 10 11:20:10 crc kubenswrapper[4780]: I1210 11:20:10.994548 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.100847 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-combined-ca-bundle\") pod \"38d1b2e8-babc-4999-a9ff-9737e801058a\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.101045 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38d1b2e8-babc-4999-a9ff-9737e801058a-logs\") pod \"38d1b2e8-babc-4999-a9ff-9737e801058a\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.101243 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-public-tls-certs\") pod \"38d1b2e8-babc-4999-a9ff-9737e801058a\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.101486 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqn4z\" (UniqueName: \"kubernetes.io/projected/38d1b2e8-babc-4999-a9ff-9737e801058a-kube-api-access-cqn4z\") pod \"38d1b2e8-babc-4999-a9ff-9737e801058a\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.101595 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-internal-tls-certs\") pod \"38d1b2e8-babc-4999-a9ff-9737e801058a\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.101637 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-config-data\") pod \"38d1b2e8-babc-4999-a9ff-9737e801058a\" (UID: \"38d1b2e8-babc-4999-a9ff-9737e801058a\") " Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.105681 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38d1b2e8-babc-4999-a9ff-9737e801058a-logs" (OuterVolumeSpecName: "logs") pod "38d1b2e8-babc-4999-a9ff-9737e801058a" (UID: "38d1b2e8-babc-4999-a9ff-9737e801058a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.145345 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38d1b2e8-babc-4999-a9ff-9737e801058a-kube-api-access-cqn4z" (OuterVolumeSpecName: "kube-api-access-cqn4z") pod "38d1b2e8-babc-4999-a9ff-9737e801058a" (UID: "38d1b2e8-babc-4999-a9ff-9737e801058a"). InnerVolumeSpecName "kube-api-access-cqn4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.207565 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqn4z\" (UniqueName: \"kubernetes.io/projected/38d1b2e8-babc-4999-a9ff-9737e801058a-kube-api-access-cqn4z\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.207612 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38d1b2e8-babc-4999-a9ff-9737e801058a-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.288162 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "38d1b2e8-babc-4999-a9ff-9737e801058a" (UID: "38d1b2e8-babc-4999-a9ff-9737e801058a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.310556 4780 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.800886 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-config-data" (OuterVolumeSpecName: "config-data") pod "38d1b2e8-babc-4999-a9ff-9737e801058a" (UID: "38d1b2e8-babc-4999-a9ff-9737e801058a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.820446 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.842708 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38d1b2e8-babc-4999-a9ff-9737e801058a" (UID: "38d1b2e8-babc-4999-a9ff-9737e801058a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.898503 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "38d1b2e8-babc-4999-a9ff-9737e801058a" (UID: "38d1b2e8-babc-4999-a9ff-9737e801058a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.931181 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:11 crc kubenswrapper[4780]: I1210 11:20:11.931234 4780 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38d1b2e8-babc-4999-a9ff-9737e801058a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.189773 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"38d1b2e8-babc-4999-a9ff-9737e801058a","Type":"ContainerDied","Data":"0ba9e1cbac4b77db11334aec3205785ad402ec481d787478e14c5c393a445afc"} Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.190355 4780 scope.go:117] "RemoveContainer" containerID="7c29bf646fe2a0a144febff357251e1518947775aef5ecc54c88b97f1496cd7f" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.190717 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.214772 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerStarted","Data":"b372237d665234eacceab5899c5c9f861810efb0e9e5252a73ff3d910a9af801"} Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.217349 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.279093 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.612312866 podStartE2EDuration="8.279059209s" podCreationTimestamp="2025-12-10 11:20:04 +0000 UTC" firstStartedPulling="2025-12-10 11:20:05.432606406 +0000 UTC m=+2110.285999849" lastFinishedPulling="2025-12-10 11:20:10.099352749 +0000 UTC m=+2114.952746192" observedRunningTime="2025-12-10 11:20:12.251788522 +0000 UTC m=+2117.105181965" watchObservedRunningTime="2025-12-10 11:20:12.279059209 +0000 UTC m=+2117.132452662" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.322182 4780 scope.go:117] "RemoveContainer" containerID="923fe5b17462bc697d8d975430e1b101178f8f0a637c1e1eedb426cd3c3254b8" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.324390 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.341735 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:12 crc kubenswrapper[4780]: E1210 11:20:12.354285 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a1a518432f9312ba0c4fd146c5002890d9676b95de8e2e786c1880cb6b14f211" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.357870 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:12 crc kubenswrapper[4780]: E1210 11:20:12.358707 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerName="nova-api-api" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.358737 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerName="nova-api-api" Dec 10 11:20:12 crc kubenswrapper[4780]: E1210 11:20:12.358786 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d296ea05-7391-4cd2-a807-1168a1547cb6" containerName="nova-manage" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.358796 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d296ea05-7391-4cd2-a807-1168a1547cb6" containerName="nova-manage" Dec 10 11:20:12 crc kubenswrapper[4780]: E1210 11:20:12.358850 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerName="nova-api-log" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.358856 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerName="nova-api-log" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.359228 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d296ea05-7391-4cd2-a807-1168a1547cb6" containerName="nova-manage" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.359261 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerName="nova-api-api" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.359283 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" containerName="nova-api-log" Dec 10 11:20:12 crc kubenswrapper[4780]: E1210 11:20:12.361413 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a1a518432f9312ba0c4fd146c5002890d9676b95de8e2e786c1880cb6b14f211" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.368051 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.371557 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.372037 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.372334 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 10 11:20:12 crc kubenswrapper[4780]: E1210 11:20:12.378948 4780 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a1a518432f9312ba0c4fd146c5002890d9676b95de8e2e786c1880cb6b14f211" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 11:20:12 crc kubenswrapper[4780]: E1210 11:20:12.379555 4780 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="462049e2-388d-465c-b954-3df68a7c0aea" containerName="nova-scheduler-scheduler" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.394001 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.453796 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de3b2cd2-6566-45ee-b000-47ebd28169a9-logs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.453897 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z4s7\" (UniqueName: \"kubernetes.io/projected/de3b2cd2-6566-45ee-b000-47ebd28169a9-kube-api-access-2z4s7\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.453969 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.454024 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-config-data\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.454092 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-public-tls-certs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.454186 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.557123 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de3b2cd2-6566-45ee-b000-47ebd28169a9-logs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.557214 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z4s7\" (UniqueName: \"kubernetes.io/projected/de3b2cd2-6566-45ee-b000-47ebd28169a9-kube-api-access-2z4s7\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.557255 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.557290 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-config-data\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.557319 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-public-tls-certs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.557404 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.558021 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de3b2cd2-6566-45ee-b000-47ebd28169a9-logs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.569971 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.570105 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-config-data\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.570139 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-public-tls-certs\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.578192 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de3b2cd2-6566-45ee-b000-47ebd28169a9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.580569 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z4s7\" (UniqueName: \"kubernetes.io/projected/de3b2cd2-6566-45ee-b000-47ebd28169a9-kube-api-access-2z4s7\") pod \"nova-api-0\" (UID: \"de3b2cd2-6566-45ee-b000-47ebd28169a9\") " pod="openstack/nova-api-0" Dec 10 11:20:12 crc kubenswrapper[4780]: I1210 11:20:12.722597 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:20:13 crc kubenswrapper[4780]: I1210 11:20:13.358410 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:20:13 crc kubenswrapper[4780]: I1210 11:20:13.385368 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.247:8775/\": read tcp 10.217.0.2:52148->10.217.0.247:8775: read: connection reset by peer" Dec 10 11:20:13 crc kubenswrapper[4780]: I1210 11:20:13.393601 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.247:8775/\": read tcp 10.217.0.2:52162->10.217.0.247:8775: read: connection reset by peer" Dec 10 11:20:13 crc kubenswrapper[4780]: I1210 11:20:13.936628 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:20:13 crc kubenswrapper[4780]: I1210 11:20:13.977214 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38d1b2e8-babc-4999-a9ff-9737e801058a" path="/var/lib/kubelet/pods/38d1b2e8-babc-4999-a9ff-9737e801058a/volumes" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.008234 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-config-data\") pod \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.008339 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-nova-metadata-tls-certs\") pod \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.008557 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-combined-ca-bundle\") pod \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.008609 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbtwt\" (UniqueName: \"kubernetes.io/projected/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-kube-api-access-dbtwt\") pod \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.008863 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-logs\") pod \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.010131 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-logs" (OuterVolumeSpecName: "logs") pod "dec862f9-f21f-4c0f-9bc6-48d2d017c57e" (UID: "dec862f9-f21f-4c0f-9bc6-48d2d017c57e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.019131 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-kube-api-access-dbtwt" (OuterVolumeSpecName: "kube-api-access-dbtwt") pod "dec862f9-f21f-4c0f-9bc6-48d2d017c57e" (UID: "dec862f9-f21f-4c0f-9bc6-48d2d017c57e"). InnerVolumeSpecName "kube-api-access-dbtwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.116559 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dec862f9-f21f-4c0f-9bc6-48d2d017c57e" (UID: "dec862f9-f21f-4c0f-9bc6-48d2d017c57e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.120422 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-config-data" (OuterVolumeSpecName: "config-data") pod "dec862f9-f21f-4c0f-9bc6-48d2d017c57e" (UID: "dec862f9-f21f-4c0f-9bc6-48d2d017c57e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.130223 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-combined-ca-bundle\") pod \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\" (UID: \"dec862f9-f21f-4c0f-9bc6-48d2d017c57e\") " Dec 10 11:20:14 crc kubenswrapper[4780]: W1210 11:20:14.130862 4780 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/dec862f9-f21f-4c0f-9bc6-48d2d017c57e/volumes/kubernetes.io~secret/combined-ca-bundle Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.130892 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dec862f9-f21f-4c0f-9bc6-48d2d017c57e" (UID: "dec862f9-f21f-4c0f-9bc6-48d2d017c57e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.133425 4780 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.133446 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.133457 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.133475 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbtwt\" (UniqueName: \"kubernetes.io/projected/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-kube-api-access-dbtwt\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.226849 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "dec862f9-f21f-4c0f-9bc6-48d2d017c57e" (UID: "dec862f9-f21f-4c0f-9bc6-48d2d017c57e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.237033 4780 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/dec862f9-f21f-4c0f-9bc6-48d2d017c57e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.257880 4780 generic.go:334] "Generic (PLEG): container finished" podID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerID="05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb" exitCode=0 Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.257996 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"dec862f9-f21f-4c0f-9bc6-48d2d017c57e","Type":"ContainerDied","Data":"05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb"} Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.258045 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"dec862f9-f21f-4c0f-9bc6-48d2d017c57e","Type":"ContainerDied","Data":"cbad93438739ca170871df4d4d7a296ebbb8f54235d627ba49e4ae2fed58f7e8"} Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.258072 4780 scope.go:117] "RemoveContainer" containerID="05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.258281 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.276183 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de3b2cd2-6566-45ee-b000-47ebd28169a9","Type":"ContainerStarted","Data":"75aeb13e6701bf2934eb1c2db6f7e11a6afd9ba81a97b0db132c71e70ee5e814"} Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.276264 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de3b2cd2-6566-45ee-b000-47ebd28169a9","Type":"ContainerStarted","Data":"a87fdbfd666b13162df6608a66be21979bb3e53cd571501bf58fcc9814e8f71c"} Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.276278 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"de3b2cd2-6566-45ee-b000-47ebd28169a9","Type":"ContainerStarted","Data":"026910c462d57f51405e6bfbd2b47db53cd564525cc1880c35aecc3feae6feb3"} Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.362270 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.362230982 podStartE2EDuration="2.362230982s" podCreationTimestamp="2025-12-10 11:20:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:20:14.307082123 +0000 UTC m=+2119.160475566" watchObservedRunningTime="2025-12-10 11:20:14.362230982 +0000 UTC m=+2119.215624425" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.395273 4780 scope.go:117] "RemoveContainer" containerID="eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.681214 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.711436 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.721171 4780 scope.go:117] "RemoveContainer" containerID="05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb" Dec 10 11:20:14 crc kubenswrapper[4780]: E1210 11:20:14.725764 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb\": container with ID starting with 05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb not found: ID does not exist" containerID="05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.725831 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb"} err="failed to get container status \"05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb\": rpc error: code = NotFound desc = could not find container \"05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb\": container with ID starting with 05c3215122612a7ac94100ba56fefb30af74ddd26a4f3fc45174dd4ea89e79bb not found: ID does not exist" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.725869 4780 scope.go:117] "RemoveContainer" containerID="eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.732230 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:20:14 crc kubenswrapper[4780]: E1210 11:20:14.733364 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-metadata" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.733395 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-metadata" Dec 10 11:20:14 crc kubenswrapper[4780]: E1210 11:20:14.733534 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-log" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.733553 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-log" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.734409 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-metadata" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.734529 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" containerName="nova-metadata-log" Dec 10 11:20:14 crc kubenswrapper[4780]: E1210 11:20:14.735678 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba\": container with ID starting with eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba not found: ID does not exist" containerID="eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.735889 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba"} err="failed to get container status \"eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba\": rpc error: code = NotFound desc = could not find container \"eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba\": container with ID starting with eb71dba908410a6885d49742cc456a5bdbd38f243eb2b4f2d1365f9a657559ba not found: ID does not exist" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.737107 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.752043 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.754977 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.767788 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.861557 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdhpc\" (UniqueName: \"kubernetes.io/projected/1949c654-c734-4a35-a616-4fd761289785-kube-api-access-wdhpc\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.861639 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-config-data\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.861711 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.861792 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.861961 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1949c654-c734-4a35-a616-4fd761289785-logs\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.959523 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.966569 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdhpc\" (UniqueName: \"kubernetes.io/projected/1949c654-c734-4a35-a616-4fd761289785-kube-api-access-wdhpc\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.966648 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-config-data\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.966704 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.966769 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.966856 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1949c654-c734-4a35-a616-4fd761289785-logs\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.968528 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1949c654-c734-4a35-a616-4fd761289785-logs\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.975598 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.976050 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-config-data\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.991911 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1949c654-c734-4a35-a616-4fd761289785-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:14 crc kubenswrapper[4780]: I1210 11:20:14.995187 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdhpc\" (UniqueName: \"kubernetes.io/projected/1949c654-c734-4a35-a616-4fd761289785-kube-api-access-wdhpc\") pod \"nova-metadata-0\" (UID: \"1949c654-c734-4a35-a616-4fd761289785\") " pod="openstack/nova-metadata-0" Dec 10 11:20:15 crc kubenswrapper[4780]: I1210 11:20:15.036139 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:20:15 crc kubenswrapper[4780]: I1210 11:20:15.090957 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:20:15 crc kubenswrapper[4780]: I1210 11:20:15.276813 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-97lpn"] Dec 10 11:20:15 crc kubenswrapper[4780]: I1210 11:20:15.980854 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dec862f9-f21f-4c0f-9bc6-48d2d017c57e" path="/var/lib/kubelet/pods/dec862f9-f21f-4c0f-9bc6-48d2d017c57e/volumes" Dec 10 11:20:16 crc kubenswrapper[4780]: W1210 11:20:16.117286 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1949c654_c734_4a35_a616_4fd761289785.slice/crio-b2ef6cefc7970ff45a1553fb641e4f0395635977582750909e106d8dfd50b16b WatchSource:0}: Error finding container b2ef6cefc7970ff45a1553fb641e4f0395635977582750909e106d8dfd50b16b: Status 404 returned error can't find the container with id b2ef6cefc7970ff45a1553fb641e4f0395635977582750909e106d8dfd50b16b Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.128260 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.378983 4780 generic.go:334] "Generic (PLEG): container finished" podID="462049e2-388d-465c-b954-3df68a7c0aea" containerID="a1a518432f9312ba0c4fd146c5002890d9676b95de8e2e786c1880cb6b14f211" exitCode=0 Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.379650 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"462049e2-388d-465c-b954-3df68a7c0aea","Type":"ContainerDied","Data":"a1a518432f9312ba0c4fd146c5002890d9676b95de8e2e786c1880cb6b14f211"} Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.388813 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-97lpn" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="registry-server" containerID="cri-o://0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb" gracePeriod=2 Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.389504 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1949c654-c734-4a35-a616-4fd761289785","Type":"ContainerStarted","Data":"b2ef6cefc7970ff45a1553fb641e4f0395635977582750909e106d8dfd50b16b"} Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.830018 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.983358 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-combined-ca-bundle\") pod \"462049e2-388d-465c-b954-3df68a7c0aea\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.984250 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8k6t\" (UniqueName: \"kubernetes.io/projected/462049e2-388d-465c-b954-3df68a7c0aea-kube-api-access-w8k6t\") pod \"462049e2-388d-465c-b954-3df68a7c0aea\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " Dec 10 11:20:16 crc kubenswrapper[4780]: I1210 11:20:16.984390 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-config-data\") pod \"462049e2-388d-465c-b954-3df68a7c0aea\" (UID: \"462049e2-388d-465c-b954-3df68a7c0aea\") " Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.015254 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/462049e2-388d-465c-b954-3df68a7c0aea-kube-api-access-w8k6t" (OuterVolumeSpecName: "kube-api-access-w8k6t") pod "462049e2-388d-465c-b954-3df68a7c0aea" (UID: "462049e2-388d-465c-b954-3df68a7c0aea"). InnerVolumeSpecName "kube-api-access-w8k6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.083387 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-config-data" (OuterVolumeSpecName: "config-data") pod "462049e2-388d-465c-b954-3df68a7c0aea" (UID: "462049e2-388d-465c-b954-3df68a7c0aea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.089292 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8k6t\" (UniqueName: \"kubernetes.io/projected/462049e2-388d-465c-b954-3df68a7c0aea-kube-api-access-w8k6t\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.092703 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.094208 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "462049e2-388d-465c-b954-3df68a7c0aea" (UID: "462049e2-388d-465c-b954-3df68a7c0aea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.201520 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/462049e2-388d-465c-b954-3df68a7c0aea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.226215 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.303285 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-catalog-content\") pod \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.304044 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxltm\" (UniqueName: \"kubernetes.io/projected/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-kube-api-access-zxltm\") pod \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.304313 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-utilities\") pod \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\" (UID: \"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae\") " Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.310003 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-utilities" (OuterVolumeSpecName: "utilities") pod "b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" (UID: "b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.310156 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-kube-api-access-zxltm" (OuterVolumeSpecName: "kube-api-access-zxltm") pod "b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" (UID: "b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae"). InnerVolumeSpecName "kube-api-access-zxltm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.408110 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.408167 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxltm\" (UniqueName: \"kubernetes.io/projected/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-kube-api-access-zxltm\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.419277 4780 generic.go:334] "Generic (PLEG): container finished" podID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerID="0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb" exitCode=0 Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.419360 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-97lpn" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.419391 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97lpn" event={"ID":"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae","Type":"ContainerDied","Data":"0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb"} Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.420336 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97lpn" event={"ID":"b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae","Type":"ContainerDied","Data":"2ebc2f5bc2904854d418f2b0b26f9903ded5da0675c1bd3e10221f2966a67bd3"} Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.420410 4780 scope.go:117] "RemoveContainer" containerID="0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.426229 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1949c654-c734-4a35-a616-4fd761289785","Type":"ContainerStarted","Data":"aa85c186813fa8549396b43aee0b1fbdebc12b02d24fd0fc0760fef795289fe9"} Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.426278 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1949c654-c734-4a35-a616-4fd761289785","Type":"ContainerStarted","Data":"d408ef10d345d7553fc2f3a5447462b3eb264de44c46d9987522ae2fe0673f22"} Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.438999 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"462049e2-388d-465c-b954-3df68a7c0aea","Type":"ContainerDied","Data":"bf0aa216b1b4865fb319f8f0f79d67caad8bdea66fb1fce6b6ff720e71056170"} Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.439231 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.465726 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.465691457 podStartE2EDuration="3.465691457s" podCreationTimestamp="2025-12-10 11:20:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:20:17.452448399 +0000 UTC m=+2122.305841842" watchObservedRunningTime="2025-12-10 11:20:17.465691457 +0000 UTC m=+2122.319084900" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.466723 4780 scope.go:117] "RemoveContainer" containerID="33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.686317 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.690064 4780 scope.go:117] "RemoveContainer" containerID="29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.717004 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.732379 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" (UID: "b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.736821 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:20:17 crc kubenswrapper[4780]: E1210 11:20:17.737905 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="registry-server" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.738046 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="registry-server" Dec 10 11:20:17 crc kubenswrapper[4780]: E1210 11:20:17.738154 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="462049e2-388d-465c-b954-3df68a7c0aea" containerName="nova-scheduler-scheduler" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.738217 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="462049e2-388d-465c-b954-3df68a7c0aea" containerName="nova-scheduler-scheduler" Dec 10 11:20:17 crc kubenswrapper[4780]: E1210 11:20:17.738294 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="extract-content" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.738356 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="extract-content" Dec 10 11:20:17 crc kubenswrapper[4780]: E1210 11:20:17.738470 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="extract-utilities" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.738553 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="extract-utilities" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.738974 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" containerName="registry-server" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.739116 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="462049e2-388d-465c-b954-3df68a7c0aea" containerName="nova-scheduler-scheduler" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.740507 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.744008 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.746513 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.755109 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.771690 4780 scope.go:117] "RemoveContainer" containerID="0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb" Dec 10 11:20:17 crc kubenswrapper[4780]: E1210 11:20:17.772940 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb\": container with ID starting with 0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb not found: ID does not exist" containerID="0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.773030 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb"} err="failed to get container status \"0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb\": rpc error: code = NotFound desc = could not find container \"0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb\": container with ID starting with 0ecdb588478460eb0e42039dbfef5d7e34267638abd3463121326834079632bb not found: ID does not exist" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.773089 4780 scope.go:117] "RemoveContainer" containerID="33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc" Dec 10 11:20:17 crc kubenswrapper[4780]: E1210 11:20:17.773543 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc\": container with ID starting with 33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc not found: ID does not exist" containerID="33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.773578 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc"} err="failed to get container status \"33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc\": rpc error: code = NotFound desc = could not find container \"33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc\": container with ID starting with 33083dfe45beac3e561188028b2d9577b9668002b401bfa1250c90a82b2ef9cc not found: ID does not exist" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.773603 4780 scope.go:117] "RemoveContainer" containerID="29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80" Dec 10 11:20:17 crc kubenswrapper[4780]: E1210 11:20:17.774183 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80\": container with ID starting with 29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80 not found: ID does not exist" containerID="29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.774228 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80"} err="failed to get container status \"29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80\": rpc error: code = NotFound desc = could not find container \"29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80\": container with ID starting with 29c763556be07dd84a7172a7c97b6961a862b245662c113f98dcb2aab5d6df80 not found: ID does not exist" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.774254 4780 scope.go:117] "RemoveContainer" containerID="a1a518432f9312ba0c4fd146c5002890d9676b95de8e2e786c1880cb6b14f211" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.851325 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.851490 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6pfx\" (UniqueName: \"kubernetes.io/projected/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-kube-api-access-r6pfx\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.852048 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-config-data\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.954828 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.955358 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6pfx\" (UniqueName: \"kubernetes.io/projected/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-kube-api-access-r6pfx\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.955729 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-config-data\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.965910 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-config-data\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.966325 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.980616 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6pfx\" (UniqueName: \"kubernetes.io/projected/90d3129e-d394-45fa-bc1e-d576fb9e1ba5-kube-api-access-r6pfx\") pod \"nova-scheduler-0\" (UID: \"90d3129e-d394-45fa-bc1e-d576fb9e1ba5\") " pod="openstack/nova-scheduler-0" Dec 10 11:20:17 crc kubenswrapper[4780]: I1210 11:20:17.983297 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="462049e2-388d-465c-b954-3df68a7c0aea" path="/var/lib/kubelet/pods/462049e2-388d-465c-b954-3df68a7c0aea/volumes" Dec 10 11:20:18 crc kubenswrapper[4780]: I1210 11:20:18.124382 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-97lpn"] Dec 10 11:20:18 crc kubenswrapper[4780]: I1210 11:20:18.125173 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:20:18 crc kubenswrapper[4780]: I1210 11:20:18.154841 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-97lpn"] Dec 10 11:20:18 crc kubenswrapper[4780]: I1210 11:20:18.709305 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:20:19 crc kubenswrapper[4780]: I1210 11:20:19.478350 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"90d3129e-d394-45fa-bc1e-d576fb9e1ba5","Type":"ContainerStarted","Data":"5651078fababf5362654c29df7272c95fe7c64346b40c1f3a5f1e7c647b691c1"} Dec 10 11:20:19 crc kubenswrapper[4780]: I1210 11:20:19.479048 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"90d3129e-d394-45fa-bc1e-d576fb9e1ba5","Type":"ContainerStarted","Data":"67513c66b6c38f0fc6dc8bc78fcd1d796bd7cb6413b5bd3c4e58092cc041f2da"} Dec 10 11:20:19 crc kubenswrapper[4780]: I1210 11:20:19.528720 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.528689286 podStartE2EDuration="2.528689286s" podCreationTimestamp="2025-12-10 11:20:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:20:19.520807134 +0000 UTC m=+2124.374200577" watchObservedRunningTime="2025-12-10 11:20:19.528689286 +0000 UTC m=+2124.382082729" Dec 10 11:20:19 crc kubenswrapper[4780]: I1210 11:20:19.974942 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae" path="/var/lib/kubelet/pods/b3c9cd45-f8fa-4329-8ad0-0a07e2fd14ae/volumes" Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.091227 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.093081 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.507525 4780 generic.go:334] "Generic (PLEG): container finished" podID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerID="bceb163fcbcf587b9f40d7609abde1f15e4e433cbb9fda2b13bc1e3dbfd75a10" exitCode=137 Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.507817 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerDied","Data":"bceb163fcbcf587b9f40d7609abde1f15e4e433cbb9fda2b13bc1e3dbfd75a10"} Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.784617 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.887375 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-scripts\") pod \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.888018 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-config-data\") pod \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.888161 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-combined-ca-bundle\") pod \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.889322 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4xs5\" (UniqueName: \"kubernetes.io/projected/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-kube-api-access-p4xs5\") pod \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.900798 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-kube-api-access-p4xs5" (OuterVolumeSpecName: "kube-api-access-p4xs5") pod "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" (UID: "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38"). InnerVolumeSpecName "kube-api-access-p4xs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:20 crc kubenswrapper[4780]: I1210 11:20:20.901774 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-scripts" (OuterVolumeSpecName: "scripts") pod "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" (UID: "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.129576 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.133132 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4xs5\" (UniqueName: \"kubernetes.io/projected/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-kube-api-access-p4xs5\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.460781 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" (UID: "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.565236 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-config-data" (OuterVolumeSpecName: "config-data") pod "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" (UID: "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.566378 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-config-data\") pod \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\" (UID: \"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38\") " Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.567501 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:21 crc kubenswrapper[4780]: W1210 11:20:21.567630 4780 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38/volumes/kubernetes.io~secret/config-data Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.567646 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-config-data" (OuterVolumeSpecName: "config-data") pod "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" (UID: "8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.580511 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38","Type":"ContainerDied","Data":"52adfe63335fef0025caf6afe0a4cadea9dbe2082ff88f81238a80f861e84d3d"} Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.580592 4780 scope.go:117] "RemoveContainer" containerID="bceb163fcbcf587b9f40d7609abde1f15e4e433cbb9fda2b13bc1e3dbfd75a10" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.580895 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.680060 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.713208 4780 scope.go:117] "RemoveContainer" containerID="1f045483cea1cd90becb2977645aace0fc7bf4b1571d2a09dff84094d3d9c8b8" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.738451 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.770501 4780 scope.go:117] "RemoveContainer" containerID="2a9bbab168b2be23e863946fb6dfa43f3bf09f6a94e5557863b9216b6fdc87aa" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.778330 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.798879 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 10 11:20:21 crc kubenswrapper[4780]: E1210 11:20:21.799815 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-listener" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.799846 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-listener" Dec 10 11:20:21 crc kubenswrapper[4780]: E1210 11:20:21.799889 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-evaluator" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.799899 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-evaluator" Dec 10 11:20:21 crc kubenswrapper[4780]: E1210 11:20:21.800009 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-api" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.800026 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-api" Dec 10 11:20:21 crc kubenswrapper[4780]: E1210 11:20:21.800061 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-notifier" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.800072 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-notifier" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.800450 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-listener" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.800482 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-api" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.800516 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-evaluator" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.800541 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" containerName="aodh-notifier" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.804203 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.809945 4780 scope.go:117] "RemoveContainer" containerID="8d01dafc17d1fbcd3e076e24bea29818b9359b2f327d963a78c13fa5998f177f" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.810798 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.810944 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.811024 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.811043 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-hflbp" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.811275 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.867757 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.886703 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-scripts\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.886907 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-public-tls-certs\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.886966 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-internal-tls-certs\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.887061 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-config-data\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.887131 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfmnx\" (UniqueName: \"kubernetes.io/projected/ae414d98-62ae-4d0c-a76e-0f7af6e32080-kube-api-access-kfmnx\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.887228 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-combined-ca-bundle\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.981222 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38" path="/var/lib/kubelet/pods/8e71b1c2-9d57-4a03-b5c3-355c9f8ffb38/volumes" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.989370 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-scripts\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.989507 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-public-tls-certs\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.989531 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-internal-tls-certs\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.989563 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-config-data\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.989601 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfmnx\" (UniqueName: \"kubernetes.io/projected/ae414d98-62ae-4d0c-a76e-0f7af6e32080-kube-api-access-kfmnx\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.989646 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-combined-ca-bundle\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.995820 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-public-tls-certs\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.997502 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-internal-tls-certs\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:21 crc kubenswrapper[4780]: I1210 11:20:21.997897 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-scripts\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:22 crc kubenswrapper[4780]: I1210 11:20:22.002363 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-combined-ca-bundle\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:22 crc kubenswrapper[4780]: I1210 11:20:22.006330 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae414d98-62ae-4d0c-a76e-0f7af6e32080-config-data\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:22 crc kubenswrapper[4780]: I1210 11:20:22.017808 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfmnx\" (UniqueName: \"kubernetes.io/projected/ae414d98-62ae-4d0c-a76e-0f7af6e32080-kube-api-access-kfmnx\") pod \"aodh-0\" (UID: \"ae414d98-62ae-4d0c-a76e-0f7af6e32080\") " pod="openstack/aodh-0" Dec 10 11:20:22 crc kubenswrapper[4780]: I1210 11:20:22.155633 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 11:20:22 crc kubenswrapper[4780]: I1210 11:20:22.722952 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:20:22 crc kubenswrapper[4780]: I1210 11:20:22.723951 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:20:22 crc kubenswrapper[4780]: I1210 11:20:22.912868 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 10 11:20:23 crc kubenswrapper[4780]: I1210 11:20:23.127137 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 10 11:20:23 crc kubenswrapper[4780]: I1210 11:20:23.621045 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ae414d98-62ae-4d0c-a76e-0f7af6e32080","Type":"ContainerStarted","Data":"874e87894a71c6caf0b4d9944ef6e6fd107133848e47b4007eeee3079c8edcc6"} Dec 10 11:20:23 crc kubenswrapper[4780]: I1210 11:20:23.736423 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="de3b2cd2-6566-45ee-b000-47ebd28169a9" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.5:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:20:23 crc kubenswrapper[4780]: I1210 11:20:23.736622 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="de3b2cd2-6566-45ee-b000-47ebd28169a9" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.5:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:20:24 crc kubenswrapper[4780]: I1210 11:20:24.637419 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ae414d98-62ae-4d0c-a76e-0f7af6e32080","Type":"ContainerStarted","Data":"8a6932853af91092f68901137cc6772aceb95f1f24bd3d6fb40730e1550a5ea5"} Dec 10 11:20:25 crc kubenswrapper[4780]: I1210 11:20:25.256766 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 11:20:25 crc kubenswrapper[4780]: I1210 11:20:25.256854 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 11:20:25 crc kubenswrapper[4780]: I1210 11:20:25.706135 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ae414d98-62ae-4d0c-a76e-0f7af6e32080","Type":"ContainerStarted","Data":"5bef2a75cd1d4a58779b1252df2b84f92d5084044c690ee230a53bc1059b33c5"} Dec 10 11:20:26 crc kubenswrapper[4780]: I1210 11:20:26.263346 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1949c654-c734-4a35-a616-4fd761289785" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:20:26 crc kubenswrapper[4780]: I1210 11:20:26.268237 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1949c654-c734-4a35-a616-4fd761289785" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:20:26 crc kubenswrapper[4780]: I1210 11:20:26.725125 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ae414d98-62ae-4d0c-a76e-0f7af6e32080","Type":"ContainerStarted","Data":"9130b2514217f61635f671868bf02f14487184de8430e9a8c6afa32a9c07c207"} Dec 10 11:20:28 crc kubenswrapper[4780]: I1210 11:20:28.320975 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 10 11:20:28 crc kubenswrapper[4780]: I1210 11:20:28.425935 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 10 11:20:28 crc kubenswrapper[4780]: I1210 11:20:28.762946 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ae414d98-62ae-4d0c-a76e-0f7af6e32080","Type":"ContainerStarted","Data":"372f07517b964b0f0c6074d55a64d41457d9c57e2d64611d9538e497ec92864c"} Dec 10 11:20:28 crc kubenswrapper[4780]: I1210 11:20:28.811569 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.272925539 podStartE2EDuration="7.811527168s" podCreationTimestamp="2025-12-10 11:20:21 +0000 UTC" firstStartedPulling="2025-12-10 11:20:22.917196395 +0000 UTC m=+2127.770589838" lastFinishedPulling="2025-12-10 11:20:27.455798014 +0000 UTC m=+2132.309191467" observedRunningTime="2025-12-10 11:20:28.80770301 +0000 UTC m=+2133.661096453" watchObservedRunningTime="2025-12-10 11:20:28.811527168 +0000 UTC m=+2133.664920621" Dec 10 11:20:28 crc kubenswrapper[4780]: I1210 11:20:28.877549 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 10 11:20:32 crc kubenswrapper[4780]: I1210 11:20:32.731022 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 11:20:32 crc kubenswrapper[4780]: I1210 11:20:32.732447 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:20:32 crc kubenswrapper[4780]: I1210 11:20:32.732982 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 11:20:32 crc kubenswrapper[4780]: I1210 11:20:32.741297 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 11:20:32 crc kubenswrapper[4780]: I1210 11:20:32.821115 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:20:32 crc kubenswrapper[4780]: I1210 11:20:32.830016 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 11:20:34 crc kubenswrapper[4780]: I1210 11:20:34.598759 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 11:20:35 crc kubenswrapper[4780]: I1210 11:20:35.098546 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 10 11:20:35 crc kubenswrapper[4780]: I1210 11:20:35.103331 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 10 11:20:35 crc kubenswrapper[4780]: I1210 11:20:35.104326 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 10 11:20:35 crc kubenswrapper[4780]: I1210 11:20:35.879711 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 10 11:20:40 crc kubenswrapper[4780]: I1210 11:20:40.608617 4780 scope.go:117] "RemoveContainer" containerID="955ce765adb3c325210d940e928848d3dd783b544db9bf9dbe0753af449c93a7" Dec 10 11:20:46 crc kubenswrapper[4780]: I1210 11:20:46.983691 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-vcml6"] Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.000563 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-vcml6"] Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.071234 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-nd4t7"] Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.074726 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.093604 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-nd4t7"] Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.215677 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh6ms\" (UniqueName: \"kubernetes.io/projected/4ba2892c-316e-4819-a33c-d7b2b6803553-kube-api-access-fh6ms\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.216058 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ba2892c-316e-4819-a33c-d7b2b6803553-config-data\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.216504 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ba2892c-316e-4819-a33c-d7b2b6803553-combined-ca-bundle\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.319787 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ba2892c-316e-4819-a33c-d7b2b6803553-combined-ca-bundle\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.320519 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh6ms\" (UniqueName: \"kubernetes.io/projected/4ba2892c-316e-4819-a33c-d7b2b6803553-kube-api-access-fh6ms\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.320631 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ba2892c-316e-4819-a33c-d7b2b6803553-config-data\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.329609 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ba2892c-316e-4819-a33c-d7b2b6803553-config-data\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.331637 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ba2892c-316e-4819-a33c-d7b2b6803553-combined-ca-bundle\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.347510 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh6ms\" (UniqueName: \"kubernetes.io/projected/4ba2892c-316e-4819-a33c-d7b2b6803553-kube-api-access-fh6ms\") pod \"heat-db-sync-nd4t7\" (UID: \"4ba2892c-316e-4819-a33c-d7b2b6803553\") " pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.409757 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-nd4t7" Dec 10 11:20:47 crc kubenswrapper[4780]: I1210 11:20:47.998286 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="253b0c60-6211-4e23-921c-b8c34ccc4e25" path="/var/lib/kubelet/pods/253b0c60-6211-4e23-921c-b8c34ccc4e25/volumes" Dec 10 11:20:48 crc kubenswrapper[4780]: I1210 11:20:48.238194 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-nd4t7"] Dec 10 11:20:48 crc kubenswrapper[4780]: W1210 11:20:48.241420 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ba2892c_316e_4819_a33c_d7b2b6803553.slice/crio-62d7de6c9436a0ccdcf0e40f055c3360b24dc5de3c0187477a3ed295142e324f WatchSource:0}: Error finding container 62d7de6c9436a0ccdcf0e40f055c3360b24dc5de3c0187477a3ed295142e324f: Status 404 returned error can't find the container with id 62d7de6c9436a0ccdcf0e40f055c3360b24dc5de3c0187477a3ed295142e324f Dec 10 11:20:48 crc kubenswrapper[4780]: E1210 11:20:48.390294 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:20:48 crc kubenswrapper[4780]: E1210 11:20:48.390407 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:20:48 crc kubenswrapper[4780]: E1210 11:20:48.390646 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:20:48 crc kubenswrapper[4780]: E1210 11:20:48.391950 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:20:48 crc kubenswrapper[4780]: I1210 11:20:48.414190 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-nd4t7" event={"ID":"4ba2892c-316e-4819-a33c-d7b2b6803553","Type":"ContainerStarted","Data":"62d7de6c9436a0ccdcf0e40f055c3360b24dc5de3c0187477a3ed295142e324f"} Dec 10 11:20:48 crc kubenswrapper[4780]: E1210 11:20:48.418471 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:20:49 crc kubenswrapper[4780]: E1210 11:20:49.446636 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:20:49 crc kubenswrapper[4780]: I1210 11:20:49.889627 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:49 crc kubenswrapper[4780]: I1210 11:20:49.890078 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="ceilometer-central-agent" containerID="cri-o://25f0df2fa9b782063ef3d7c20587bb703538055926bde0fe32614c8c7aef7812" gracePeriod=30 Dec 10 11:20:49 crc kubenswrapper[4780]: I1210 11:20:49.890184 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="proxy-httpd" containerID="cri-o://b372237d665234eacceab5899c5c9f861810efb0e9e5252a73ff3d910a9af801" gracePeriod=30 Dec 10 11:20:49 crc kubenswrapper[4780]: I1210 11:20:49.890263 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="sg-core" containerID="cri-o://ea52ddb186d02a6a57a149772c450dfda5d185d102ab3134eca37301437fe980" gracePeriod=30 Dec 10 11:20:49 crc kubenswrapper[4780]: I1210 11:20:49.890312 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="ceilometer-notification-agent" containerID="cri-o://7464de1995d228f9845884516bd18c1e32598a04ab32fd93b3925a534f3a5771" gracePeriod=30 Dec 10 11:20:49 crc kubenswrapper[4780]: I1210 11:20:49.939966 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:20:50 crc kubenswrapper[4780]: I1210 11:20:50.086565 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:20:50 crc kubenswrapper[4780]: I1210 11:20:50.457804 4780 generic.go:334] "Generic (PLEG): container finished" podID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerID="b372237d665234eacceab5899c5c9f861810efb0e9e5252a73ff3d910a9af801" exitCode=0 Dec 10 11:20:50 crc kubenswrapper[4780]: I1210 11:20:50.457862 4780 generic.go:334] "Generic (PLEG): container finished" podID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerID="ea52ddb186d02a6a57a149772c450dfda5d185d102ab3134eca37301437fe980" exitCode=2 Dec 10 11:20:50 crc kubenswrapper[4780]: I1210 11:20:50.457861 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerDied","Data":"b372237d665234eacceab5899c5c9f861810efb0e9e5252a73ff3d910a9af801"} Dec 10 11:20:50 crc kubenswrapper[4780]: I1210 11:20:50.457958 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerDied","Data":"ea52ddb186d02a6a57a149772c450dfda5d185d102ab3134eca37301437fe980"} Dec 10 11:20:51 crc kubenswrapper[4780]: I1210 11:20:51.482809 4780 generic.go:334] "Generic (PLEG): container finished" podID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerID="25f0df2fa9b782063ef3d7c20587bb703538055926bde0fe32614c8c7aef7812" exitCode=0 Dec 10 11:20:51 crc kubenswrapper[4780]: I1210 11:20:51.483534 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerDied","Data":"25f0df2fa9b782063ef3d7c20587bb703538055926bde0fe32614c8c7aef7812"} Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.607186 4780 generic.go:334] "Generic (PLEG): container finished" podID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerID="7464de1995d228f9845884516bd18c1e32598a04ab32fd93b3925a534f3a5771" exitCode=0 Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.607965 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerDied","Data":"7464de1995d228f9845884516bd18c1e32598a04ab32fd93b3925a534f3a5771"} Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.838871 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.949167 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-combined-ca-bundle\") pod \"cba3d639-6379-42fd-bdb7-072dcf87f78f\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.949781 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-run-httpd\") pod \"cba3d639-6379-42fd-bdb7-072dcf87f78f\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.951365 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cba3d639-6379-42fd-bdb7-072dcf87f78f" (UID: "cba3d639-6379-42fd-bdb7-072dcf87f78f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.952367 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-scripts\") pod \"cba3d639-6379-42fd-bdb7-072dcf87f78f\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.952433 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-log-httpd\") pod \"cba3d639-6379-42fd-bdb7-072dcf87f78f\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.952512 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-config-data\") pod \"cba3d639-6379-42fd-bdb7-072dcf87f78f\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.952561 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-768kb\" (UniqueName: \"kubernetes.io/projected/cba3d639-6379-42fd-bdb7-072dcf87f78f-kube-api-access-768kb\") pod \"cba3d639-6379-42fd-bdb7-072dcf87f78f\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.952730 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-sg-core-conf-yaml\") pod \"cba3d639-6379-42fd-bdb7-072dcf87f78f\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.952840 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-ceilometer-tls-certs\") pod \"cba3d639-6379-42fd-bdb7-072dcf87f78f\" (UID: \"cba3d639-6379-42fd-bdb7-072dcf87f78f\") " Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.955301 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cba3d639-6379-42fd-bdb7-072dcf87f78f" (UID: "cba3d639-6379-42fd-bdb7-072dcf87f78f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.963704 4780 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.963758 4780 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cba3d639-6379-42fd-bdb7-072dcf87f78f-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.996825 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cba3d639-6379-42fd-bdb7-072dcf87f78f-kube-api-access-768kb" (OuterVolumeSpecName: "kube-api-access-768kb") pod "cba3d639-6379-42fd-bdb7-072dcf87f78f" (UID: "cba3d639-6379-42fd-bdb7-072dcf87f78f"). InnerVolumeSpecName "kube-api-access-768kb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:20:56 crc kubenswrapper[4780]: I1210 11:20:56.997555 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-scripts" (OuterVolumeSpecName: "scripts") pod "cba3d639-6379-42fd-bdb7-072dcf87f78f" (UID: "cba3d639-6379-42fd-bdb7-072dcf87f78f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.066399 4780 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.066442 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-768kb\" (UniqueName: \"kubernetes.io/projected/cba3d639-6379-42fd-bdb7-072dcf87f78f-kube-api-access-768kb\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.137702 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cba3d639-6379-42fd-bdb7-072dcf87f78f" (UID: "cba3d639-6379-42fd-bdb7-072dcf87f78f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.170854 4780 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.175114 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "cba3d639-6379-42fd-bdb7-072dcf87f78f" (UID: "cba3d639-6379-42fd-bdb7-072dcf87f78f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.255390 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cba3d639-6379-42fd-bdb7-072dcf87f78f" (UID: "cba3d639-6379-42fd-bdb7-072dcf87f78f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.282745 4780 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.282966 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.316712 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-config-data" (OuterVolumeSpecName: "config-data") pod "cba3d639-6379-42fd-bdb7-072dcf87f78f" (UID: "cba3d639-6379-42fd-bdb7-072dcf87f78f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.390576 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cba3d639-6379-42fd-bdb7-072dcf87f78f-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.644602 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cba3d639-6379-42fd-bdb7-072dcf87f78f","Type":"ContainerDied","Data":"e437f6730268f7f2ad69a06408eead4fa0089bac665f00f6679622c4f1c5b30c"} Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.645203 4780 scope.go:117] "RemoveContainer" containerID="b372237d665234eacceab5899c5c9f861810efb0e9e5252a73ff3d910a9af801" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.645093 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.732425 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="930a45eb-72d1-4060-92de-2e348073eb16" containerName="rabbitmq" containerID="cri-o://96434be8fedd20bc464a85ec30be23748e84f67ecee82cb8881db8f915ee8ee9" gracePeriod=604793 Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.748116 4780 scope.go:117] "RemoveContainer" containerID="ea52ddb186d02a6a57a149772c450dfda5d185d102ab3134eca37301437fe980" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.764174 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.800393 4780 scope.go:117] "RemoveContainer" containerID="7464de1995d228f9845884516bd18c1e32598a04ab32fd93b3925a534f3a5771" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.813973 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.847606 4780 scope.go:117] "RemoveContainer" containerID="25f0df2fa9b782063ef3d7c20587bb703538055926bde0fe32614c8c7aef7812" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.851064 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:57 crc kubenswrapper[4780]: E1210 11:20:57.852286 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="ceilometer-notification-agent" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.852324 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="ceilometer-notification-agent" Dec 10 11:20:57 crc kubenswrapper[4780]: E1210 11:20:57.852364 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="sg-core" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.852374 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="sg-core" Dec 10 11:20:57 crc kubenswrapper[4780]: E1210 11:20:57.852419 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="ceilometer-central-agent" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.852428 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="ceilometer-central-agent" Dec 10 11:20:57 crc kubenswrapper[4780]: E1210 11:20:57.852483 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="proxy-httpd" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.852493 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="proxy-httpd" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.852872 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="proxy-httpd" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.852942 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="ceilometer-central-agent" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.852957 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="sg-core" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.852980 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" containerName="ceilometer-notification-agent" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.862712 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.867258 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.867469 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.867508 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.867721 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.928930 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.929012 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-run-httpd\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.929056 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-config-data\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.929110 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf2w8\" (UniqueName: \"kubernetes.io/projected/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-kube-api-access-gf2w8\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.929695 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.929910 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-log-httpd\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.930382 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.931000 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-scripts\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:57 crc kubenswrapper[4780]: I1210 11:20:57.975420 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cba3d639-6379-42fd-bdb7-072dcf87f78f" path="/var/lib/kubelet/pods/cba3d639-6379-42fd-bdb7-072dcf87f78f/volumes" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.034027 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-scripts\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.035353 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.035392 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-run-httpd\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.035429 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-config-data\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.035473 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf2w8\" (UniqueName: \"kubernetes.io/projected/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-kube-api-access-gf2w8\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.035555 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.035596 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-log-httpd\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.035704 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.036186 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-run-httpd\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.036520 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-log-httpd\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.043209 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.043871 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-config-data\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.044556 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-scripts\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.061319 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.061643 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.062312 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf2w8\" (UniqueName: \"kubernetes.io/projected/317b5b7c-bb08-4441-a2ef-8c2d7390ada6-kube-api-access-gf2w8\") pod \"ceilometer-0\" (UID: \"317b5b7c-bb08-4441-a2ef-8c2d7390ada6\") " pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.208174 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:20:58 crc kubenswrapper[4780]: I1210 11:20:58.222214 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerName="rabbitmq" containerID="cri-o://0d29a425c249ad598c9ad2f8907eb021b533375acc4ac5133b190c0761613d86" gracePeriod=604792 Dec 10 11:20:59 crc kubenswrapper[4780]: I1210 11:20:59.506695 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:20:59 crc kubenswrapper[4780]: E1210 11:20:59.658105 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:20:59 crc kubenswrapper[4780]: E1210 11:20:59.658197 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:20:59 crc kubenswrapper[4780]: E1210 11:20:59.658412 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:21:00 crc kubenswrapper[4780]: I1210 11:21:00.031591 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"317b5b7c-bb08-4441-a2ef-8c2d7390ada6","Type":"ContainerStarted","Data":"5fa76632df982d1e4f0a7706e0831fa9941d68d06f01f929c19e6b93f5ab73dc"} Dec 10 11:21:01 crc kubenswrapper[4780]: E1210 11:21:01.048347 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:21:01 crc kubenswrapper[4780]: E1210 11:21:01.049260 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:21:01 crc kubenswrapper[4780]: E1210 11:21:01.049484 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:21:01 crc kubenswrapper[4780]: E1210 11:21:01.050758 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:21:01 crc kubenswrapper[4780]: I1210 11:21:01.058229 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"317b5b7c-bb08-4441-a2ef-8c2d7390ada6","Type":"ContainerStarted","Data":"889eb5c8413c7de63ee36e69fc6bd91fb6ed289ab958d18de06a1f889ea7786a"} Dec 10 11:21:03 crc kubenswrapper[4780]: I1210 11:21:03.096145 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"317b5b7c-bb08-4441-a2ef-8c2d7390ada6","Type":"ContainerStarted","Data":"964b57b90fff8adee5b86127af07babde1f3e0f4451a025a2805a41a309947d6"} Dec 10 11:21:03 crc kubenswrapper[4780]: E1210 11:21:03.770663 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:21:03 crc kubenswrapper[4780]: I1210 11:21:03.987692 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="930a45eb-72d1-4060-92de-2e348073eb16" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.132079 4780 generic.go:334] "Generic (PLEG): container finished" podID="930a45eb-72d1-4060-92de-2e348073eb16" containerID="96434be8fedd20bc464a85ec30be23748e84f67ecee82cb8881db8f915ee8ee9" exitCode=0 Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.132246 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"930a45eb-72d1-4060-92de-2e348073eb16","Type":"ContainerDied","Data":"96434be8fedd20bc464a85ec30be23748e84f67ecee82cb8881db8f915ee8ee9"} Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.138980 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"317b5b7c-bb08-4441-a2ef-8c2d7390ada6","Type":"ContainerStarted","Data":"99ca6d0dddc064844cf0c50f0af20055ce10b0f202558866302cf7acc37964c9"} Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.139242 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:21:04 crc kubenswrapper[4780]: E1210 11:21:04.145149 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.315770 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.692674 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818237 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818492 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-plugins-conf\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818549 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-tls\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818571 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-config-data\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818629 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcpxv\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-kube-api-access-bcpxv\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818746 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-plugins\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818814 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/930a45eb-72d1-4060-92de-2e348073eb16-erlang-cookie-secret\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818867 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/930a45eb-72d1-4060-92de-2e348073eb16-pod-info\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.818982 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-server-conf\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.819034 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-erlang-cookie\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.819098 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-confd\") pod \"930a45eb-72d1-4060-92de-2e348073eb16\" (UID: \"930a45eb-72d1-4060-92de-2e348073eb16\") " Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.822554 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.825270 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.836508 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.843088 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-kube-api-access-bcpxv" (OuterVolumeSpecName: "kube-api-access-bcpxv") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "kube-api-access-bcpxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.850764 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/930a45eb-72d1-4060-92de-2e348073eb16-pod-info" (OuterVolumeSpecName: "pod-info") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.856847 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/930a45eb-72d1-4060-92de-2e348073eb16-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.857259 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.997893 4780 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.998545 4780 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.998560 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcpxv\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-kube-api-access-bcpxv\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.998575 4780 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.998587 4780 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/930a45eb-72d1-4060-92de-2e348073eb16-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.998598 4780 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/930a45eb-72d1-4060-92de-2e348073eb16-pod-info\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:04 crc kubenswrapper[4780]: I1210 11:21:04.998611 4780 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.045820 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.103459 4780 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.127746 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-config-data" (OuterVolumeSpecName: "config-data") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.162797 4780 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.177033 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-server-conf" (OuterVolumeSpecName: "server-conf") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.198221 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.198252 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"930a45eb-72d1-4060-92de-2e348073eb16","Type":"ContainerDied","Data":"973f12245001808e3a5d185833d5abacda718988a830d803b98fbc87612fc2eb"} Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.198352 4780 scope.go:117] "RemoveContainer" containerID="96434be8fedd20bc464a85ec30be23748e84f67ecee82cb8881db8f915ee8ee9" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.208470 4780 generic.go:334] "Generic (PLEG): container finished" podID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerID="0d29a425c249ad598c9ad2f8907eb021b533375acc4ac5133b190c0761613d86" exitCode=0 Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.208560 4780 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-server-conf\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.208595 4780 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.208605 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/930a45eb-72d1-4060-92de-2e348073eb16-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.208987 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae","Type":"ContainerDied","Data":"0d29a425c249ad598c9ad2f8907eb021b533375acc4ac5133b190c0761613d86"} Dec 10 11:21:05 crc kubenswrapper[4780]: E1210 11:21:05.218499 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.249793 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.296014 4780 scope.go:117] "RemoveContainer" containerID="ed7f26679a7c0d61b6e2539f857a270f9cc2f930158baaf0029c54842591e814" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.339839 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "930a45eb-72d1-4060-92de-2e348073eb16" (UID: "930a45eb-72d1-4060-92de-2e348073eb16"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.421400 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptcx5\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-kube-api-access-ptcx5\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.421568 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-plugins-conf\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.421659 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-server-conf\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.421728 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-plugins\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.421775 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-pod-info\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.421951 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-erlang-cookie-secret\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.422039 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-confd\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.422079 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-config-data\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.422128 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-erlang-cookie\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.422164 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-tls\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.422225 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\" (UID: \"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae\") " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.423242 4780 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/930a45eb-72d1-4060-92de-2e348073eb16-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.429315 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.432691 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.434012 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.435074 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.439000 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-pod-info" (OuterVolumeSpecName: "pod-info") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.442195 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.454589 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.465040 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-kube-api-access-ptcx5" (OuterVolumeSpecName: "kube-api-access-ptcx5") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "kube-api-access-ptcx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.526509 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptcx5\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-kube-api-access-ptcx5\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.527108 4780 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.527126 4780 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.527137 4780 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-pod-info\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.527146 4780 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.527157 4780 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.527174 4780 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.527218 4780 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.527830 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-config-data" (OuterVolumeSpecName: "config-data") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.599936 4780 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.639170 4780 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.639215 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.661582 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-server-conf" (OuterVolumeSpecName: "server-conf") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.744705 4780 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-server-conf\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.793383 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" (UID: "9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.847302 4780 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.889650 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.908579 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.937587 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:21:05 crc kubenswrapper[4780]: E1210 11:21:05.938385 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="930a45eb-72d1-4060-92de-2e348073eb16" containerName="rabbitmq" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.938423 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="930a45eb-72d1-4060-92de-2e348073eb16" containerName="rabbitmq" Dec 10 11:21:05 crc kubenswrapper[4780]: E1210 11:21:05.938443 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerName="rabbitmq" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.938453 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerName="rabbitmq" Dec 10 11:21:05 crc kubenswrapper[4780]: E1210 11:21:05.938511 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="930a45eb-72d1-4060-92de-2e348073eb16" containerName="setup-container" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.938521 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="930a45eb-72d1-4060-92de-2e348073eb16" containerName="setup-container" Dec 10 11:21:05 crc kubenswrapper[4780]: E1210 11:21:05.938571 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerName="setup-container" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.938584 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerName="setup-container" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.938956 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="930a45eb-72d1-4060-92de-2e348073eb16" containerName="rabbitmq" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.938987 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" containerName="rabbitmq" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.942410 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.955792 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.955869 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.956262 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.956530 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.956664 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.956882 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.957638 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wl44s" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.980479 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="930a45eb-72d1-4060-92de-2e348073eb16" path="/var/lib/kubelet/pods/930a45eb-72d1-4060-92de-2e348073eb16/volumes" Dec 10 11:21:05 crc kubenswrapper[4780]: I1210 11:21:05.984810 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.054897 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8twxm\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-kube-api-access-8twxm\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055016 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055095 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055153 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055217 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055249 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f371fb3f-c503-4308-b0fc-1a180c7e131e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055353 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055411 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f371fb3f-c503-4308-b0fc-1a180c7e131e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055596 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055642 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-config-data\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.055712 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.160715 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f371fb3f-c503-4308-b0fc-1a180c7e131e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.160833 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.160860 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-config-data\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.160882 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.161044 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8twxm\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-kube-api-access-8twxm\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.161084 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.161111 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.161164 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.161202 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.161229 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f371fb3f-c503-4308-b0fc-1a180c7e131e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.161270 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.162176 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.162597 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-config-data\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.163394 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.163821 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.166255 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f371fb3f-c503-4308-b0fc-1a180c7e131e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.166321 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.169579 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.169680 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.171273 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f371fb3f-c503-4308-b0fc-1a180c7e131e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.171584 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f371fb3f-c503-4308-b0fc-1a180c7e131e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.187852 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8twxm\" (UniqueName: \"kubernetes.io/projected/f371fb3f-c503-4308-b0fc-1a180c7e131e-kube-api-access-8twxm\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.233477 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae","Type":"ContainerDied","Data":"a01dbf379f65621e3f1448b3f2f18728c81f468790d8799ce9d6b901918f4f42"} Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.233580 4780 scope.go:117] "RemoveContainer" containerID="0d29a425c249ad598c9ad2f8907eb021b533375acc4ac5133b190c0761613d86" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.233895 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.241767 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"f371fb3f-c503-4308-b0fc-1a180c7e131e\") " pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.280127 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.297521 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.306877 4780 scope.go:117] "RemoveContainer" containerID="5b477ceaf617590d7e0355d0772b11a52ca0d161acfc933673d187471a1a5ca7" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.325777 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.360081 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.364586 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.370628 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.370831 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.370986 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.371750 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.371966 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-5qm56" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.372515 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.374250 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.404032 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.472354 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkf57\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-kube-api-access-bkf57\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.472487 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/16eb03dd-df0a-4623-a42a-25a086709c69-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.472899 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.473083 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.473142 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.473267 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.473421 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.473615 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/16eb03dd-df0a-4623-a42a-25a086709c69-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.473646 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.473716 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.473744 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.581045 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.581721 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/16eb03dd-df0a-4623-a42a-25a086709c69-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.581744 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.581784 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.581808 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.581849 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkf57\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-kube-api-access-bkf57\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.581898 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/16eb03dd-df0a-4623-a42a-25a086709c69-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.602336 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.602873 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.603023 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.603068 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.603187 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.603896 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.605453 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.606099 4780 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.607358 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.610407 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.622532 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/16eb03dd-df0a-4623-a42a-25a086709c69-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.626833 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/16eb03dd-df0a-4623-a42a-25a086709c69-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.647779 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/16eb03dd-df0a-4623-a42a-25a086709c69-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.699886 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkf57\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-kube-api-access-bkf57\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.751412 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/16eb03dd-df0a-4623-a42a-25a086709c69-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:06 crc kubenswrapper[4780]: I1210 11:21:06.863044 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"16eb03dd-df0a-4623-a42a-25a086709c69\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.017656 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:07 crc kubenswrapper[4780]: W1210 11:21:07.302169 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf371fb3f_c503_4308_b0fc_1a180c7e131e.slice/crio-65056f15185f6332ccef4fa67943aaba2580b3eaa3d8111bb274cd428a62c25c WatchSource:0}: Error finding container 65056f15185f6332ccef4fa67943aaba2580b3eaa3d8111bb274cd428a62c25c: Status 404 returned error can't find the container with id 65056f15185f6332ccef4fa67943aaba2580b3eaa3d8111bb274cd428a62c25c Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.310852 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.587928 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-d8crr"] Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.608327 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.618895 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.648580 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-d8crr"] Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.752130 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.806899 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-config\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.807063 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.807714 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.807815 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4r2s\" (UniqueName: \"kubernetes.io/projected/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-kube-api-access-v4r2s\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.807843 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.808078 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.808107 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.911307 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.917959 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4r2s\" (UniqueName: \"kubernetes.io/projected/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-kube-api-access-v4r2s\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.918068 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.918373 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.918405 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.918663 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-config\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.918742 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.921904 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.922685 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.923500 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.924029 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.933350 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.937589 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-config\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:07 crc kubenswrapper[4780]: I1210 11:21:07.958992 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4r2s\" (UniqueName: \"kubernetes.io/projected/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-kube-api-access-v4r2s\") pod \"dnsmasq-dns-7d84b4d45c-d8crr\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:08 crc kubenswrapper[4780]: I1210 11:21:08.032465 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae" path="/var/lib/kubelet/pods/9f6ef7c1-91bd-4109-af1b-9cf3960ec2ae/volumes" Dec 10 11:21:08 crc kubenswrapper[4780]: I1210 11:21:08.198771 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:08 crc kubenswrapper[4780]: I1210 11:21:08.320622 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"16eb03dd-df0a-4623-a42a-25a086709c69","Type":"ContainerStarted","Data":"8e5c4292efb6b0c23731f050b05b599375621d10219c16f686a667a97caf426d"} Dec 10 11:21:08 crc kubenswrapper[4780]: I1210 11:21:08.322889 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f371fb3f-c503-4308-b0fc-1a180c7e131e","Type":"ContainerStarted","Data":"65056f15185f6332ccef4fa67943aaba2580b3eaa3d8111bb274cd428a62c25c"} Dec 10 11:21:08 crc kubenswrapper[4780]: I1210 11:21:08.819250 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-d8crr"] Dec 10 11:21:09 crc kubenswrapper[4780]: I1210 11:21:09.353714 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" event={"ID":"29fe3c97-0597-4b95-b6d4-22d1f6c50a73","Type":"ContainerStarted","Data":"e18b29b313dfb917b7cc6ab6fbacc1e3eee32871f7886819056970e540f89d1b"} Dec 10 11:21:10 crc kubenswrapper[4780]: I1210 11:21:10.377085 4780 generic.go:334] "Generic (PLEG): container finished" podID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" containerID="48620244c5d459914b27d4f547412270d0007e83d9443720107657fcd4ca6f20" exitCode=0 Dec 10 11:21:10 crc kubenswrapper[4780]: I1210 11:21:10.377268 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" event={"ID":"29fe3c97-0597-4b95-b6d4-22d1f6c50a73","Type":"ContainerDied","Data":"48620244c5d459914b27d4f547412270d0007e83d9443720107657fcd4ca6f20"} Dec 10 11:21:11 crc kubenswrapper[4780]: I1210 11:21:11.398617 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" event={"ID":"29fe3c97-0597-4b95-b6d4-22d1f6c50a73","Type":"ContainerStarted","Data":"407a78aea5a2e3a4f1b9323951180fe75a0ba1a20793b0020190bd272ab43190"} Dec 10 11:21:11 crc kubenswrapper[4780]: I1210 11:21:11.399070 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:11 crc kubenswrapper[4780]: I1210 11:21:11.442214 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" podStartSLOduration=4.442178942 podStartE2EDuration="4.442178942s" podCreationTimestamp="2025-12-10 11:21:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:21:11.427074861 +0000 UTC m=+2176.280468324" watchObservedRunningTime="2025-12-10 11:21:11.442178942 +0000 UTC m=+2176.295572385" Dec 10 11:21:11 crc kubenswrapper[4780]: E1210 11:21:11.964479 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:21:15 crc kubenswrapper[4780]: I1210 11:21:15.459022 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f371fb3f-c503-4308-b0fc-1a180c7e131e","Type":"ContainerStarted","Data":"9c2cb3733fc4358c437386420f4d0233d8a30f64d7df055fd3c2f7983d38060b"} Dec 10 11:21:16 crc kubenswrapper[4780]: I1210 11:21:16.475373 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"16eb03dd-df0a-4623-a42a-25a086709c69","Type":"ContainerStarted","Data":"2a2e76281d86f67b240c0dc785557cc4a7c475775a996c83e27007a3cefd1bd5"} Dec 10 11:21:17 crc kubenswrapper[4780]: I1210 11:21:17.983617 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 11:21:18 crc kubenswrapper[4780]: E1210 11:21:18.098091 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:21:18 crc kubenswrapper[4780]: E1210 11:21:18.098441 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:21:18 crc kubenswrapper[4780]: E1210 11:21:18.098816 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:21:18 crc kubenswrapper[4780]: E1210 11:21:18.100203 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:21:18 crc kubenswrapper[4780]: I1210 11:21:18.202060 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:18 crc kubenswrapper[4780]: I1210 11:21:18.323083 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw"] Dec 10 11:21:18 crc kubenswrapper[4780]: I1210 11:21:18.333549 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" podUID="ea79dec7-12f6-4443-b129-233fad774365" containerName="dnsmasq-dns" containerID="cri-o://91373aa1f93d376da96852e2ff5f1f90415bb446df36cd4f8fc66e8e029eb6d8" gracePeriod=10 Dec 10 11:21:18 crc kubenswrapper[4780]: I1210 11:21:18.537849 4780 generic.go:334] "Generic (PLEG): container finished" podID="ea79dec7-12f6-4443-b129-233fad774365" containerID="91373aa1f93d376da96852e2ff5f1f90415bb446df36cd4f8fc66e8e029eb6d8" exitCode=0 Dec 10 11:21:18 crc kubenswrapper[4780]: I1210 11:21:18.538239 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" event={"ID":"ea79dec7-12f6-4443-b129-233fad774365","Type":"ContainerDied","Data":"91373aa1f93d376da96852e2ff5f1f90415bb446df36cd4f8fc66e8e029eb6d8"} Dec 10 11:21:18 crc kubenswrapper[4780]: E1210 11:21:18.547414 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:21:18 crc kubenswrapper[4780]: I1210 11:21:18.665946 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-pbpzf"] Dec 10 11:21:18 crc kubenswrapper[4780]: I1210 11:21:18.675768 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:18 crc kubenswrapper[4780]: I1210 11:21:18.686122 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-pbpzf"] Dec 10 11:21:18 crc kubenswrapper[4780]: E1210 11:21:18.696750 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea79dec7_12f6_4443_b129_233fad774365.slice/crio-91373aa1f93d376da96852e2ff5f1f90415bb446df36cd4f8fc66e8e029eb6d8.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.281758 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.282402 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.282532 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.282595 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.282633 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t29t9\" (UniqueName: \"kubernetes.io/projected/c79c532d-1798-4422-be7c-1b212a5f6973-kube-api-access-t29t9\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.282752 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-config\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.282888 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.406183 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t29t9\" (UniqueName: \"kubernetes.io/projected/c79c532d-1798-4422-be7c-1b212a5f6973-kube-api-access-t29t9\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.406437 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-config\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.406636 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.406750 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.406820 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.407011 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.407086 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.408757 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-config\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.409493 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.410038 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.410566 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.412165 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.418230 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c79c532d-1798-4422-be7c-1b212a5f6973-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.443141 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t29t9\" (UniqueName: \"kubernetes.io/projected/c79c532d-1798-4422-be7c-1b212a5f6973-kube-api-access-t29t9\") pod \"dnsmasq-dns-6f6df4f56c-pbpzf\" (UID: \"c79c532d-1798-4422-be7c-1b212a5f6973\") " pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.555622 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" event={"ID":"ea79dec7-12f6-4443-b129-233fad774365","Type":"ContainerDied","Data":"72c7c7ea41474d2e47a438870372e1faf2a2f1c93779dc9f5f99bd94a19d2702"} Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.556112 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72c7c7ea41474d2e47a438870372e1faf2a2f1c93779dc9f5f99bd94a19d2702" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.591164 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.617273 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.725313 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzrzs\" (UniqueName: \"kubernetes.io/projected/ea79dec7-12f6-4443-b129-233fad774365-kube-api-access-zzrzs\") pod \"ea79dec7-12f6-4443-b129-233fad774365\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.725388 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-sb\") pod \"ea79dec7-12f6-4443-b129-233fad774365\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.725612 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-config\") pod \"ea79dec7-12f6-4443-b129-233fad774365\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.725747 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-nb\") pod \"ea79dec7-12f6-4443-b129-233fad774365\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.725874 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-svc\") pod \"ea79dec7-12f6-4443-b129-233fad774365\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.725973 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-swift-storage-0\") pod \"ea79dec7-12f6-4443-b129-233fad774365\" (UID: \"ea79dec7-12f6-4443-b129-233fad774365\") " Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.769121 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea79dec7-12f6-4443-b129-233fad774365-kube-api-access-zzrzs" (OuterVolumeSpecName: "kube-api-access-zzrzs") pod "ea79dec7-12f6-4443-b129-233fad774365" (UID: "ea79dec7-12f6-4443-b129-233fad774365"). InnerVolumeSpecName "kube-api-access-zzrzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.840227 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-config" (OuterVolumeSpecName: "config") pod "ea79dec7-12f6-4443-b129-233fad774365" (UID: "ea79dec7-12f6-4443-b129-233fad774365"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.843814 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzrzs\" (UniqueName: \"kubernetes.io/projected/ea79dec7-12f6-4443-b129-233fad774365-kube-api-access-zzrzs\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.843867 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.932165 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ea79dec7-12f6-4443-b129-233fad774365" (UID: "ea79dec7-12f6-4443-b129-233fad774365"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.937687 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ea79dec7-12f6-4443-b129-233fad774365" (UID: "ea79dec7-12f6-4443-b129-233fad774365"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.954152 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.955376 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:19 crc kubenswrapper[4780]: I1210 11:21:19.998324 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ea79dec7-12f6-4443-b129-233fad774365" (UID: "ea79dec7-12f6-4443-b129-233fad774365"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:20 crc kubenswrapper[4780]: I1210 11:21:20.006668 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ea79dec7-12f6-4443-b129-233fad774365" (UID: "ea79dec7-12f6-4443-b129-233fad774365"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:20 crc kubenswrapper[4780]: I1210 11:21:20.058533 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:20 crc kubenswrapper[4780]: I1210 11:21:20.058588 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ea79dec7-12f6-4443-b129-233fad774365-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:20 crc kubenswrapper[4780]: I1210 11:21:20.399072 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-pbpzf"] Dec 10 11:21:20 crc kubenswrapper[4780]: W1210 11:21:20.406482 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc79c532d_1798_4422_be7c_1b212a5f6973.slice/crio-daf4533de86af65b1113a95cf6755d073b5806942254bd6f6df6b337b038e79e WatchSource:0}: Error finding container daf4533de86af65b1113a95cf6755d073b5806942254bd6f6df6b337b038e79e: Status 404 returned error can't find the container with id daf4533de86af65b1113a95cf6755d073b5806942254bd6f6df6b337b038e79e Dec 10 11:21:20 crc kubenswrapper[4780]: I1210 11:21:20.574452 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw" Dec 10 11:21:20 crc kubenswrapper[4780]: I1210 11:21:20.575633 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" event={"ID":"c79c532d-1798-4422-be7c-1b212a5f6973","Type":"ContainerStarted","Data":"daf4533de86af65b1113a95cf6755d073b5806942254bd6f6df6b337b038e79e"} Dec 10 11:21:20 crc kubenswrapper[4780]: I1210 11:21:20.709299 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw"] Dec 10 11:21:20 crc kubenswrapper[4780]: I1210 11:21:20.752353 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-xhxdw"] Dec 10 11:21:21 crc kubenswrapper[4780]: I1210 11:21:21.595267 4780 generic.go:334] "Generic (PLEG): container finished" podID="c79c532d-1798-4422-be7c-1b212a5f6973" containerID="d81b9030ec2f41c0028532acbd2125a5a326dd6fbbf3c411cdcfad3113a9a671" exitCode=0 Dec 10 11:21:21 crc kubenswrapper[4780]: I1210 11:21:21.595357 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" event={"ID":"c79c532d-1798-4422-be7c-1b212a5f6973","Type":"ContainerDied","Data":"d81b9030ec2f41c0028532acbd2125a5a326dd6fbbf3c411cdcfad3113a9a671"} Dec 10 11:21:21 crc kubenswrapper[4780]: I1210 11:21:21.978331 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea79dec7-12f6-4443-b129-233fad774365" path="/var/lib/kubelet/pods/ea79dec7-12f6-4443-b129-233fad774365/volumes" Dec 10 11:21:22 crc kubenswrapper[4780]: I1210 11:21:22.612978 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" event={"ID":"c79c532d-1798-4422-be7c-1b212a5f6973","Type":"ContainerStarted","Data":"e2d351e73911fa0ea8cf503a0dc3f944cd695a1de28d979a2d6a5a2625a4294e"} Dec 10 11:21:22 crc kubenswrapper[4780]: I1210 11:21:22.613442 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:22 crc kubenswrapper[4780]: I1210 11:21:22.645320 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" podStartSLOduration=4.64528655 podStartE2EDuration="4.64528655s" podCreationTimestamp="2025-12-10 11:21:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:21:22.635743834 +0000 UTC m=+2187.489137287" watchObservedRunningTime="2025-12-10 11:21:22.64528655 +0000 UTC m=+2187.498680003" Dec 10 11:21:23 crc kubenswrapper[4780]: E1210 11:21:23.087035 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:21:23 crc kubenswrapper[4780]: E1210 11:21:23.087116 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:21:23 crc kubenswrapper[4780]: E1210 11:21:23.087302 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:21:23 crc kubenswrapper[4780]: E1210 11:21:23.088814 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:21:27 crc kubenswrapper[4780]: I1210 11:21:27.476515 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:21:27 crc kubenswrapper[4780]: I1210 11:21:27.478911 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:21:29 crc kubenswrapper[4780]: I1210 11:21:29.620587 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6f6df4f56c-pbpzf" Dec 10 11:21:29 crc kubenswrapper[4780]: I1210 11:21:29.746125 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-d8crr"] Dec 10 11:21:29 crc kubenswrapper[4780]: I1210 11:21:29.747793 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" podUID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" containerName="dnsmasq-dns" containerID="cri-o://407a78aea5a2e3a4f1b9323951180fe75a0ba1a20793b0020190bd272ab43190" gracePeriod=10 Dec 10 11:21:30 crc kubenswrapper[4780]: I1210 11:21:30.756169 4780 generic.go:334] "Generic (PLEG): container finished" podID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" containerID="407a78aea5a2e3a4f1b9323951180fe75a0ba1a20793b0020190bd272ab43190" exitCode=0 Dec 10 11:21:30 crc kubenswrapper[4780]: I1210 11:21:30.756262 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" event={"ID":"29fe3c97-0597-4b95-b6d4-22d1f6c50a73","Type":"ContainerDied","Data":"407a78aea5a2e3a4f1b9323951180fe75a0ba1a20793b0020190bd272ab43190"} Dec 10 11:21:30 crc kubenswrapper[4780]: I1210 11:21:30.928718 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.131866 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-nb\") pod \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.133156 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-config\") pod \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.133197 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-swift-storage-0\") pod \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.133475 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-openstack-edpm-ipam\") pod \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.133599 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-svc\") pod \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.133703 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-sb\") pod \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.133859 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4r2s\" (UniqueName: \"kubernetes.io/projected/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-kube-api-access-v4r2s\") pod \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\" (UID: \"29fe3c97-0597-4b95-b6d4-22d1f6c50a73\") " Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.142146 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-kube-api-access-v4r2s" (OuterVolumeSpecName: "kube-api-access-v4r2s") pod "29fe3c97-0597-4b95-b6d4-22d1f6c50a73" (UID: "29fe3c97-0597-4b95-b6d4-22d1f6c50a73"). InnerVolumeSpecName "kube-api-access-v4r2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.209358 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-config" (OuterVolumeSpecName: "config") pod "29fe3c97-0597-4b95-b6d4-22d1f6c50a73" (UID: "29fe3c97-0597-4b95-b6d4-22d1f6c50a73"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.216033 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "29fe3c97-0597-4b95-b6d4-22d1f6c50a73" (UID: "29fe3c97-0597-4b95-b6d4-22d1f6c50a73"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.227381 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "29fe3c97-0597-4b95-b6d4-22d1f6c50a73" (UID: "29fe3c97-0597-4b95-b6d4-22d1f6c50a73"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.231423 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "29fe3c97-0597-4b95-b6d4-22d1f6c50a73" (UID: "29fe3c97-0597-4b95-b6d4-22d1f6c50a73"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.233376 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "29fe3c97-0597-4b95-b6d4-22d1f6c50a73" (UID: "29fe3c97-0597-4b95-b6d4-22d1f6c50a73"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.235048 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "29fe3c97-0597-4b95-b6d4-22d1f6c50a73" (UID: "29fe3c97-0597-4b95-b6d4-22d1f6c50a73"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.237392 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4r2s\" (UniqueName: \"kubernetes.io/projected/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-kube-api-access-v4r2s\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.237419 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.237433 4780 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.237443 4780 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.237451 4780 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.237460 4780 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.237468 4780 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29fe3c97-0597-4b95-b6d4-22d1f6c50a73-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.775718 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" event={"ID":"29fe3c97-0597-4b95-b6d4-22d1f6c50a73","Type":"ContainerDied","Data":"e18b29b313dfb917b7cc6ab6fbacc1e3eee32871f7886819056970e540f89d1b"} Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.775838 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-d8crr" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.776301 4780 scope.go:117] "RemoveContainer" containerID="407a78aea5a2e3a4f1b9323951180fe75a0ba1a20793b0020190bd272ab43190" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.816935 4780 scope.go:117] "RemoveContainer" containerID="48620244c5d459914b27d4f547412270d0007e83d9443720107657fcd4ca6f20" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.828410 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-d8crr"] Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.840774 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-d8crr"] Dec 10 11:21:31 crc kubenswrapper[4780]: E1210 11:21:31.962802 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:21:31 crc kubenswrapper[4780]: I1210 11:21:31.976730 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" path="/var/lib/kubelet/pods/29fe3c97-0597-4b95-b6d4-22d1f6c50a73/volumes" Dec 10 11:21:35 crc kubenswrapper[4780]: E1210 11:21:35.975907 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:21:40 crc kubenswrapper[4780]: I1210 11:21:40.916074 4780 scope.go:117] "RemoveContainer" containerID="4b80868b847dad59daacf4883d411e8b6a1644f6aacfd5b357c1061b720450e0" Dec 10 11:21:40 crc kubenswrapper[4780]: I1210 11:21:40.952873 4780 scope.go:117] "RemoveContainer" containerID="2be4b9bdd746ab4e7cbde6d8d26a1dd11549ed2a7276bde6dfb93d5b25013263" Dec 10 11:21:43 crc kubenswrapper[4780]: E1210 11:21:43.057015 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:21:43 crc kubenswrapper[4780]: E1210 11:21:43.057878 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:21:43 crc kubenswrapper[4780]: E1210 11:21:43.058071 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:21:43 crc kubenswrapper[4780]: E1210 11:21:43.059310 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.519693 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9"] Dec 10 11:21:43 crc kubenswrapper[4780]: E1210 11:21:43.520834 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea79dec7-12f6-4443-b129-233fad774365" containerName="dnsmasq-dns" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.520863 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea79dec7-12f6-4443-b129-233fad774365" containerName="dnsmasq-dns" Dec 10 11:21:43 crc kubenswrapper[4780]: E1210 11:21:43.520941 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" containerName="dnsmasq-dns" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.520953 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" containerName="dnsmasq-dns" Dec 10 11:21:43 crc kubenswrapper[4780]: E1210 11:21:43.520987 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" containerName="init" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.520996 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" containerName="init" Dec 10 11:21:43 crc kubenswrapper[4780]: E1210 11:21:43.521014 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea79dec7-12f6-4443-b129-233fad774365" containerName="init" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.521022 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea79dec7-12f6-4443-b129-233fad774365" containerName="init" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.521405 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea79dec7-12f6-4443-b129-233fad774365" containerName="dnsmasq-dns" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.521467 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="29fe3c97-0597-4b95-b6d4-22d1f6c50a73" containerName="dnsmasq-dns" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.523031 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.528314 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.528640 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.528437 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.530599 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.536820 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9"] Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.634114 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.634217 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.634493 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2brfw\" (UniqueName: \"kubernetes.io/projected/52c6020f-bcfe-437d-93cf-c88d8b77018c-kube-api-access-2brfw\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.634524 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.737447 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.737557 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.737714 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2brfw\" (UniqueName: \"kubernetes.io/projected/52c6020f-bcfe-437d-93cf-c88d8b77018c-kube-api-access-2brfw\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.737750 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.746490 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.747738 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.748865 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.772468 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2brfw\" (UniqueName: \"kubernetes.io/projected/52c6020f-bcfe-437d-93cf-c88d8b77018c-kube-api-access-2brfw\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:43 crc kubenswrapper[4780]: I1210 11:21:43.852459 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:21:44 crc kubenswrapper[4780]: I1210 11:21:44.988224 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9"] Dec 10 11:21:45 crc kubenswrapper[4780]: I1210 11:21:45.263732 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" event={"ID":"52c6020f-bcfe-437d-93cf-c88d8b77018c","Type":"ContainerStarted","Data":"444127c6c344e332d8bf7418dc0f98b498f4891776f0cbec0410df2ad8a77ded"} Dec 10 11:21:46 crc kubenswrapper[4780]: E1210 11:21:46.961941 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:21:48 crc kubenswrapper[4780]: I1210 11:21:48.321269 4780 generic.go:334] "Generic (PLEG): container finished" podID="f371fb3f-c503-4308-b0fc-1a180c7e131e" containerID="9c2cb3733fc4358c437386420f4d0233d8a30f64d7df055fd3c2f7983d38060b" exitCode=0 Dec 10 11:21:48 crc kubenswrapper[4780]: I1210 11:21:48.321877 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f371fb3f-c503-4308-b0fc-1a180c7e131e","Type":"ContainerDied","Data":"9c2cb3733fc4358c437386420f4d0233d8a30f64d7df055fd3c2f7983d38060b"} Dec 10 11:21:48 crc kubenswrapper[4780]: I1210 11:21:48.328515 4780 generic.go:334] "Generic (PLEG): container finished" podID="16eb03dd-df0a-4623-a42a-25a086709c69" containerID="2a2e76281d86f67b240c0dc785557cc4a7c475775a996c83e27007a3cefd1bd5" exitCode=0 Dec 10 11:21:48 crc kubenswrapper[4780]: I1210 11:21:48.328594 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"16eb03dd-df0a-4623-a42a-25a086709c69","Type":"ContainerDied","Data":"2a2e76281d86f67b240c0dc785557cc4a7c475775a996c83e27007a3cefd1bd5"} Dec 10 11:21:50 crc kubenswrapper[4780]: I1210 11:21:50.380959 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f371fb3f-c503-4308-b0fc-1a180c7e131e","Type":"ContainerStarted","Data":"521cd53982182bd04341b7dc024359d3e4193e88676cc2d32972a6ceeac92ba9"} Dec 10 11:21:50 crc kubenswrapper[4780]: I1210 11:21:50.383470 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 10 11:21:50 crc kubenswrapper[4780]: I1210 11:21:50.385662 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"16eb03dd-df0a-4623-a42a-25a086709c69","Type":"ContainerStarted","Data":"a0f6b33afca8a70aa95d7869458624e167544c45cf618f7eb7b55b84037227b2"} Dec 10 11:21:50 crc kubenswrapper[4780]: I1210 11:21:50.386189 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:21:50 crc kubenswrapper[4780]: I1210 11:21:50.423600 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=45.423564029 podStartE2EDuration="45.423564029s" podCreationTimestamp="2025-12-10 11:21:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:21:50.410710537 +0000 UTC m=+2215.264103980" watchObservedRunningTime="2025-12-10 11:21:50.423564029 +0000 UTC m=+2215.276957472" Dec 10 11:21:50 crc kubenswrapper[4780]: I1210 11:21:50.455211 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=44.455181917 podStartE2EDuration="44.455181917s" podCreationTimestamp="2025-12-10 11:21:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:21:50.443569587 +0000 UTC m=+2215.296963060" watchObservedRunningTime="2025-12-10 11:21:50.455181917 +0000 UTC m=+2215.308575360" Dec 10 11:21:55 crc kubenswrapper[4780]: I1210 11:21:55.192531 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" podUID="6f18f8cf-e493-41bd-92e6-a7714992854d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:21:55 crc kubenswrapper[4780]: I1210 11:21:55.195578 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-rnfbd" podUID="6f18f8cf-e493-41bd-92e6-a7714992854d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:21:55 crc kubenswrapper[4780]: E1210 11:21:55.986466 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:21:57 crc kubenswrapper[4780]: I1210 11:21:57.475606 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:21:57 crc kubenswrapper[4780]: I1210 11:21:57.476216 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:22:02 crc kubenswrapper[4780]: E1210 11:22:02.049716 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:22:04 crc kubenswrapper[4780]: E1210 11:22:04.630390 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest" Dec 10 11:22:04 crc kubenswrapper[4780]: E1210 11:22:04.632608 4780 kuberuntime_manager.go:1274] "Unhandled Error" err=< Dec 10 11:22:04 crc kubenswrapper[4780]: container &Container{Name:repo-setup-edpm-deployment-openstack-edpm-ipam,Image:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,Command:[],Args:[ansible-runner run /runner -p playbook.yaml -i repo-setup-edpm-deployment-openstack-edpm-ipam],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:ANSIBLE_VERBOSITY,Value:2,ValueFrom:nil,},EnvVar{Name:RUNNER_PLAYBOOK,Value: Dec 10 11:22:04 crc kubenswrapper[4780]: - hosts: all Dec 10 11:22:04 crc kubenswrapper[4780]: strategy: linear Dec 10 11:22:04 crc kubenswrapper[4780]: tasks: Dec 10 11:22:04 crc kubenswrapper[4780]: - name: Enable podified-repos Dec 10 11:22:04 crc kubenswrapper[4780]: become: true Dec 10 11:22:04 crc kubenswrapper[4780]: ansible.builtin.shell: | Dec 10 11:22:04 crc kubenswrapper[4780]: set -euxo pipefail Dec 10 11:22:04 crc kubenswrapper[4780]: pushd /var/tmp Dec 10 11:22:04 crc kubenswrapper[4780]: curl -sL https://github.com/openstack-k8s-operators/repo-setup/archive/refs/heads/main.tar.gz | tar -xz Dec 10 11:22:04 crc kubenswrapper[4780]: pushd repo-setup-main Dec 10 11:22:04 crc kubenswrapper[4780]: python3 -m venv ./venv Dec 10 11:22:04 crc kubenswrapper[4780]: PBR_VERSION=0.0.0 ./venv/bin/pip install ./ Dec 10 11:22:04 crc kubenswrapper[4780]: ./venv/bin/repo-setup current-podified -b antelope Dec 10 11:22:04 crc kubenswrapper[4780]: popd Dec 10 11:22:04 crc kubenswrapper[4780]: rm -rf repo-setup-main Dec 10 11:22:04 crc kubenswrapper[4780]: Dec 10 11:22:04 crc kubenswrapper[4780]: Dec 10 11:22:04 crc kubenswrapper[4780]: ,ValueFrom:nil,},EnvVar{Name:RUNNER_EXTRA_VARS,Value: Dec 10 11:22:04 crc kubenswrapper[4780]: edpm_override_hosts: openstack-edpm-ipam Dec 10 11:22:04 crc kubenswrapper[4780]: edpm_service_type: repo-setup Dec 10 11:22:04 crc kubenswrapper[4780]: Dec 10 11:22:04 crc kubenswrapper[4780]: Dec 10 11:22:04 crc kubenswrapper[4780]: ,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:repo-setup-combined-ca-bundle,ReadOnly:false,MountPath:/var/lib/openstack/cacerts/repo-setup,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/runner/env/ssh_key,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:inventory,ReadOnly:false,MountPath:/runner/inventory/hosts,SubPath:inventory,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2brfw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:openstack-aee-default-env,},Optional:*true,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9_openstack(52c6020f-bcfe-437d-93cf-c88d8b77018c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Dec 10 11:22:04 crc kubenswrapper[4780]: > logger="UnhandledError" Dec 10 11:22:04 crc kubenswrapper[4780]: E1210 11:22:04.634447 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" podUID="52c6020f-bcfe-437d-93cf-c88d8b77018c" Dec 10 11:22:05 crc kubenswrapper[4780]: E1210 11:22:05.420449 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest\\\"\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" podUID="52c6020f-bcfe-437d-93cf-c88d8b77018c" Dec 10 11:22:06 crc kubenswrapper[4780]: I1210 11:22:06.302351 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="f371fb3f-c503-4308-b0fc-1a180c7e131e" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.11:5671: connect: connection refused" Dec 10 11:22:06 crc kubenswrapper[4780]: E1210 11:22:06.962801 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:22:07 crc kubenswrapper[4780]: I1210 11:22:07.032130 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="16eb03dd-df0a-4623-a42a-25a086709c69" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.12:5671: connect: connection refused" Dec 10 11:22:15 crc kubenswrapper[4780]: E1210 11:22:15.060090 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:22:15 crc kubenswrapper[4780]: E1210 11:22:15.061043 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:22:15 crc kubenswrapper[4780]: E1210 11:22:15.061236 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:22:15 crc kubenswrapper[4780]: E1210 11:22:15.062549 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:22:16 crc kubenswrapper[4780]: I1210 11:22:16.338664 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 10 11:22:17 crc kubenswrapper[4780]: I1210 11:22:17.021253 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:22:17 crc kubenswrapper[4780]: I1210 11:22:17.822132 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:22:17 crc kubenswrapper[4780]: E1210 11:22:17.961412 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:22:19 crc kubenswrapper[4780]: I1210 11:22:19.455491 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" event={"ID":"52c6020f-bcfe-437d-93cf-c88d8b77018c","Type":"ContainerStarted","Data":"1d46bb21a51e73a300606c1aa10e522ca1305e3e29006bde07db7bf1e224a1df"} Dec 10 11:22:19 crc kubenswrapper[4780]: I1210 11:22:19.490716 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" podStartSLOduration=3.6919867650000002 podStartE2EDuration="36.490679403s" podCreationTimestamp="2025-12-10 11:21:43 +0000 UTC" firstStartedPulling="2025-12-10 11:21:45.018394523 +0000 UTC m=+2209.871787966" lastFinishedPulling="2025-12-10 11:22:17.817087161 +0000 UTC m=+2242.670480604" observedRunningTime="2025-12-10 11:22:19.486429924 +0000 UTC m=+2244.339823387" watchObservedRunningTime="2025-12-10 11:22:19.490679403 +0000 UTC m=+2244.344072846" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.473242 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4dxw2"] Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.476137 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.476230 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.478013 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.478213 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.479331 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a4c56cbf13f0b58a88ae470c0b33e021ebda5393c2067f7ccb2e5ac1ebff5108"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.479412 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://a4c56cbf13f0b58a88ae470c0b33e021ebda5393c2067f7ccb2e5ac1ebff5108" gracePeriod=600 Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.711391 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4dxw2"] Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.774135 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4srhw\" (UniqueName: \"kubernetes.io/projected/a1c7c343-44ef-464b-bd25-e01c78b74fbd-kube-api-access-4srhw\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.774230 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-catalog-content\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.774254 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-utilities\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.848259 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="a4c56cbf13f0b58a88ae470c0b33e021ebda5393c2067f7ccb2e5ac1ebff5108" exitCode=0 Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.848322 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"a4c56cbf13f0b58a88ae470c0b33e021ebda5393c2067f7ccb2e5ac1ebff5108"} Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.848365 4780 scope.go:117] "RemoveContainer" containerID="90f38aa24e74e78a263a82b742bebf91991ff25f2212114904e33abbbd82de16" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.878402 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-catalog-content\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.878464 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-utilities\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.878749 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4srhw\" (UniqueName: \"kubernetes.io/projected/a1c7c343-44ef-464b-bd25-e01c78b74fbd-kube-api-access-4srhw\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.879674 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-catalog-content\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.880155 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-utilities\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: I1210 11:22:27.915537 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4srhw\" (UniqueName: \"kubernetes.io/projected/a1c7c343-44ef-464b-bd25-e01c78b74fbd-kube-api-access-4srhw\") pod \"certified-operators-4dxw2\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:27 crc kubenswrapper[4780]: E1210 11:22:27.961724 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:22:28 crc kubenswrapper[4780]: I1210 11:22:28.072870 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:28 crc kubenswrapper[4780]: I1210 11:22:28.819370 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4dxw2"] Dec 10 11:22:28 crc kubenswrapper[4780]: I1210 11:22:28.897911 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208"} Dec 10 11:22:28 crc kubenswrapper[4780]: I1210 11:22:28.908047 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dxw2" event={"ID":"a1c7c343-44ef-464b-bd25-e01c78b74fbd","Type":"ContainerStarted","Data":"5a08a4683012c28a007d58215176798a3f6ee6610384ccc4bc18d1ff2fde1051"} Dec 10 11:22:29 crc kubenswrapper[4780]: I1210 11:22:29.924092 4780 generic.go:334] "Generic (PLEG): container finished" podID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerID="a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765" exitCode=0 Dec 10 11:22:29 crc kubenswrapper[4780]: I1210 11:22:29.926770 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dxw2" event={"ID":"a1c7c343-44ef-464b-bd25-e01c78b74fbd","Type":"ContainerDied","Data":"a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765"} Dec 10 11:22:30 crc kubenswrapper[4780]: E1210 11:22:30.059738 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:22:30 crc kubenswrapper[4780]: E1210 11:22:30.059843 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:22:30 crc kubenswrapper[4780]: E1210 11:22:30.060048 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:22:30 crc kubenswrapper[4780]: E1210 11:22:30.061276 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:22:30 crc kubenswrapper[4780]: I1210 11:22:30.946271 4780 generic.go:334] "Generic (PLEG): container finished" podID="52c6020f-bcfe-437d-93cf-c88d8b77018c" containerID="1d46bb21a51e73a300606c1aa10e522ca1305e3e29006bde07db7bf1e224a1df" exitCode=0 Dec 10 11:22:30 crc kubenswrapper[4780]: I1210 11:22:30.947313 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" event={"ID":"52c6020f-bcfe-437d-93cf-c88d8b77018c","Type":"ContainerDied","Data":"1d46bb21a51e73a300606c1aa10e522ca1305e3e29006bde07db7bf1e224a1df"} Dec 10 11:22:30 crc kubenswrapper[4780]: I1210 11:22:30.954829 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dxw2" event={"ID":"a1c7c343-44ef-464b-bd25-e01c78b74fbd","Type":"ContainerStarted","Data":"f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25"} Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.576242 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.705388 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2brfw\" (UniqueName: \"kubernetes.io/projected/52c6020f-bcfe-437d-93cf-c88d8b77018c-kube-api-access-2brfw\") pod \"52c6020f-bcfe-437d-93cf-c88d8b77018c\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.705575 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-ssh-key\") pod \"52c6020f-bcfe-437d-93cf-c88d8b77018c\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.706082 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-inventory\") pod \"52c6020f-bcfe-437d-93cf-c88d8b77018c\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.706160 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-repo-setup-combined-ca-bundle\") pod \"52c6020f-bcfe-437d-93cf-c88d8b77018c\" (UID: \"52c6020f-bcfe-437d-93cf-c88d8b77018c\") " Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.715280 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "52c6020f-bcfe-437d-93cf-c88d8b77018c" (UID: "52c6020f-bcfe-437d-93cf-c88d8b77018c"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.717605 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52c6020f-bcfe-437d-93cf-c88d8b77018c-kube-api-access-2brfw" (OuterVolumeSpecName: "kube-api-access-2brfw") pod "52c6020f-bcfe-437d-93cf-c88d8b77018c" (UID: "52c6020f-bcfe-437d-93cf-c88d8b77018c"). InnerVolumeSpecName "kube-api-access-2brfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.756816 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-inventory" (OuterVolumeSpecName: "inventory") pod "52c6020f-bcfe-437d-93cf-c88d8b77018c" (UID: "52c6020f-bcfe-437d-93cf-c88d8b77018c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.765990 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "52c6020f-bcfe-437d-93cf-c88d8b77018c" (UID: "52c6020f-bcfe-437d-93cf-c88d8b77018c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.810095 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.810156 4780 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.810170 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2brfw\" (UniqueName: \"kubernetes.io/projected/52c6020f-bcfe-437d-93cf-c88d8b77018c-kube-api-access-2brfw\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:33 crc kubenswrapper[4780]: I1210 11:22:33.810183 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/52c6020f-bcfe-437d-93cf-c88d8b77018c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.044290 4780 generic.go:334] "Generic (PLEG): container finished" podID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerID="f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25" exitCode=0 Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.044434 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dxw2" event={"ID":"a1c7c343-44ef-464b-bd25-e01c78b74fbd","Type":"ContainerDied","Data":"f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25"} Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.064245 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" event={"ID":"52c6020f-bcfe-437d-93cf-c88d8b77018c","Type":"ContainerDied","Data":"444127c6c344e332d8bf7418dc0f98b498f4891776f0cbec0410df2ad8a77ded"} Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.064322 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="444127c6c344e332d8bf7418dc0f98b498f4891776f0cbec0410df2ad8a77ded" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.064419 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.717864 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq"] Dec 10 11:22:34 crc kubenswrapper[4780]: E1210 11:22:34.718753 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52c6020f-bcfe-437d-93cf-c88d8b77018c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.719474 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="52c6020f-bcfe-437d-93cf-c88d8b77018c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.719843 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="52c6020f-bcfe-437d-93cf-c88d8b77018c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.721150 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.725264 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.725732 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.726167 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.726534 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.769352 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq"] Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.840875 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.841161 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.841262 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76mmj\" (UniqueName: \"kubernetes.io/projected/c9cea268-1292-4512-b22f-891a4c652dd0-kube-api-access-76mmj\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.948146 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.948297 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76mmj\" (UniqueName: \"kubernetes.io/projected/c9cea268-1292-4512-b22f-891a4c652dd0-kube-api-access-76mmj\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.948474 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.968997 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.969119 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:34 crc kubenswrapper[4780]: I1210 11:22:34.983571 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76mmj\" (UniqueName: \"kubernetes.io/projected/c9cea268-1292-4512-b22f-891a4c652dd0-kube-api-access-76mmj\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-zcxsq\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:35 crc kubenswrapper[4780]: I1210 11:22:35.070338 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:36 crc kubenswrapper[4780]: I1210 11:22:36.110779 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq"] Dec 10 11:22:36 crc kubenswrapper[4780]: I1210 11:22:36.115089 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" event={"ID":"c9cea268-1292-4512-b22f-891a4c652dd0","Type":"ContainerStarted","Data":"19dd55eaac4ec997bff170a88abc0c237050553e7146b07942cfec9b0d7a9dc5"} Dec 10 11:22:36 crc kubenswrapper[4780]: I1210 11:22:36.118905 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dxw2" event={"ID":"a1c7c343-44ef-464b-bd25-e01c78b74fbd","Type":"ContainerStarted","Data":"13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255"} Dec 10 11:22:36 crc kubenswrapper[4780]: I1210 11:22:36.153865 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4dxw2" podStartSLOduration=4.253240537 podStartE2EDuration="9.153833768s" podCreationTimestamp="2025-12-10 11:22:27 +0000 UTC" firstStartedPulling="2025-12-10 11:22:29.929082199 +0000 UTC m=+2254.782475642" lastFinishedPulling="2025-12-10 11:22:34.82967543 +0000 UTC m=+2259.683068873" observedRunningTime="2025-12-10 11:22:36.141414867 +0000 UTC m=+2260.994808310" watchObservedRunningTime="2025-12-10 11:22:36.153833768 +0000 UTC m=+2261.007227211" Dec 10 11:22:37 crc kubenswrapper[4780]: I1210 11:22:37.138896 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" event={"ID":"c9cea268-1292-4512-b22f-891a4c652dd0","Type":"ContainerStarted","Data":"dfaeaa90bdb69c4379110fdba49e6b6f9dce8be474f56fae0164c65f8d7de39f"} Dec 10 11:22:37 crc kubenswrapper[4780]: I1210 11:22:37.168765 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" podStartSLOduration=2.645679266 podStartE2EDuration="3.168721499s" podCreationTimestamp="2025-12-10 11:22:34 +0000 UTC" firstStartedPulling="2025-12-10 11:22:36.088212211 +0000 UTC m=+2260.941605654" lastFinishedPulling="2025-12-10 11:22:36.611254444 +0000 UTC m=+2261.464647887" observedRunningTime="2025-12-10 11:22:37.155838015 +0000 UTC m=+2262.009231458" watchObservedRunningTime="2025-12-10 11:22:37.168721499 +0000 UTC m=+2262.022114942" Dec 10 11:22:38 crc kubenswrapper[4780]: I1210 11:22:38.120499 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:38 crc kubenswrapper[4780]: I1210 11:22:38.261824 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:39 crc kubenswrapper[4780]: I1210 11:22:39.231109 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-4dxw2" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="registry-server" probeResult="failure" output=< Dec 10 11:22:39 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:22:39 crc kubenswrapper[4780]: > Dec 10 11:22:40 crc kubenswrapper[4780]: E1210 11:22:40.964333 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:22:41 crc kubenswrapper[4780]: I1210 11:22:41.497374 4780 scope.go:117] "RemoveContainer" containerID="f6ace8dfe64718b23c500adcd34c8fe4675d4c79cd3cf8b4e4e9695435e57217" Dec 10 11:22:41 crc kubenswrapper[4780]: I1210 11:22:41.537486 4780 generic.go:334] "Generic (PLEG): container finished" podID="c9cea268-1292-4512-b22f-891a4c652dd0" containerID="dfaeaa90bdb69c4379110fdba49e6b6f9dce8be474f56fae0164c65f8d7de39f" exitCode=0 Dec 10 11:22:41 crc kubenswrapper[4780]: I1210 11:22:41.537554 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" event={"ID":"c9cea268-1292-4512-b22f-891a4c652dd0","Type":"ContainerDied","Data":"dfaeaa90bdb69c4379110fdba49e6b6f9dce8be474f56fae0164c65f8d7de39f"} Dec 10 11:22:41 crc kubenswrapper[4780]: I1210 11:22:41.587670 4780 scope.go:117] "RemoveContainer" containerID="970290ae16f5d622a134ec7215bb08fd80d8d9c966aa67a88af90738884d8672" Dec 10 11:22:41 crc kubenswrapper[4780]: E1210 11:22:41.961089 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.315054 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.426680 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-inventory\") pod \"c9cea268-1292-4512-b22f-891a4c652dd0\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.427190 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-ssh-key\") pod \"c9cea268-1292-4512-b22f-891a4c652dd0\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.427270 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76mmj\" (UniqueName: \"kubernetes.io/projected/c9cea268-1292-4512-b22f-891a4c652dd0-kube-api-access-76mmj\") pod \"c9cea268-1292-4512-b22f-891a4c652dd0\" (UID: \"c9cea268-1292-4512-b22f-891a4c652dd0\") " Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.436699 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9cea268-1292-4512-b22f-891a4c652dd0-kube-api-access-76mmj" (OuterVolumeSpecName: "kube-api-access-76mmj") pod "c9cea268-1292-4512-b22f-891a4c652dd0" (UID: "c9cea268-1292-4512-b22f-891a4c652dd0"). InnerVolumeSpecName "kube-api-access-76mmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.471030 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c9cea268-1292-4512-b22f-891a4c652dd0" (UID: "c9cea268-1292-4512-b22f-891a4c652dd0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.500727 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-inventory" (OuterVolumeSpecName: "inventory") pod "c9cea268-1292-4512-b22f-891a4c652dd0" (UID: "c9cea268-1292-4512-b22f-891a4c652dd0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.533659 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.533712 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76mmj\" (UniqueName: \"kubernetes.io/projected/c9cea268-1292-4512-b22f-891a4c652dd0-kube-api-access-76mmj\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.533731 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c9cea268-1292-4512-b22f-891a4c652dd0-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.569027 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" event={"ID":"c9cea268-1292-4512-b22f-891a4c652dd0","Type":"ContainerDied","Data":"19dd55eaac4ec997bff170a88abc0c237050553e7146b07942cfec9b0d7a9dc5"} Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.569089 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19dd55eaac4ec997bff170a88abc0c237050553e7146b07942cfec9b0d7a9dc5" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.569099 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-zcxsq" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.703935 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv"] Dec 10 11:22:43 crc kubenswrapper[4780]: E1210 11:22:43.705431 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9cea268-1292-4512-b22f-891a4c652dd0" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.705470 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9cea268-1292-4512-b22f-891a4c652dd0" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.706751 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9cea268-1292-4512-b22f-891a4c652dd0" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.707986 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.714726 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.715202 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.717201 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.718255 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.757159 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv"] Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.899303 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvdl9\" (UniqueName: \"kubernetes.io/projected/51663ab7-946b-4356-b694-5ba7132781f4-kube-api-access-mvdl9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.899373 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.900097 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:43 crc kubenswrapper[4780]: I1210 11:22:43.900171 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.004120 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvdl9\" (UniqueName: \"kubernetes.io/projected/51663ab7-946b-4356-b694-5ba7132781f4-kube-api-access-mvdl9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.004250 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.004458 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.004493 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.010746 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.010765 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.011787 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.028716 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvdl9\" (UniqueName: \"kubernetes.io/projected/51663ab7-946b-4356-b694-5ba7132781f4-kube-api-access-mvdl9\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.038022 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:22:44 crc kubenswrapper[4780]: I1210 11:22:44.791884 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv"] Dec 10 11:22:45 crc kubenswrapper[4780]: I1210 11:22:45.613212 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" event={"ID":"51663ab7-946b-4356-b694-5ba7132781f4","Type":"ContainerStarted","Data":"fa98564ba4c3540c237d6e9bc8ed65c5d63155372bff6b5519a35cfaf1e40afc"} Dec 10 11:22:46 crc kubenswrapper[4780]: I1210 11:22:46.628789 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" event={"ID":"51663ab7-946b-4356-b694-5ba7132781f4","Type":"ContainerStarted","Data":"2ea8d02715fa0c0643c8aa8e41af81a506916a8ba477b75124c902b883f1e6b1"} Dec 10 11:22:46 crc kubenswrapper[4780]: I1210 11:22:46.658704 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" podStartSLOduration=3.062498166 podStartE2EDuration="3.658672372s" podCreationTimestamp="2025-12-10 11:22:43 +0000 UTC" firstStartedPulling="2025-12-10 11:22:44.806570753 +0000 UTC m=+2269.659964196" lastFinishedPulling="2025-12-10 11:22:45.402744959 +0000 UTC m=+2270.256138402" observedRunningTime="2025-12-10 11:22:46.647559337 +0000 UTC m=+2271.500952790" watchObservedRunningTime="2025-12-10 11:22:46.658672372 +0000 UTC m=+2271.512065815" Dec 10 11:22:48 crc kubenswrapper[4780]: I1210 11:22:48.142267 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:48 crc kubenswrapper[4780]: I1210 11:22:48.221545 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:48 crc kubenswrapper[4780]: I1210 11:22:48.405394 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4dxw2"] Dec 10 11:22:49 crc kubenswrapper[4780]: I1210 11:22:49.970308 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4dxw2" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="registry-server" containerID="cri-o://13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255" gracePeriod=2 Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.640775 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.814801 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-catalog-content\") pod \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.815129 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4srhw\" (UniqueName: \"kubernetes.io/projected/a1c7c343-44ef-464b-bd25-e01c78b74fbd-kube-api-access-4srhw\") pod \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.815250 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-utilities\") pod \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\" (UID: \"a1c7c343-44ef-464b-bd25-e01c78b74fbd\") " Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.816628 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-utilities" (OuterVolumeSpecName: "utilities") pod "a1c7c343-44ef-464b-bd25-e01c78b74fbd" (UID: "a1c7c343-44ef-464b-bd25-e01c78b74fbd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.830086 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1c7c343-44ef-464b-bd25-e01c78b74fbd-kube-api-access-4srhw" (OuterVolumeSpecName: "kube-api-access-4srhw") pod "a1c7c343-44ef-464b-bd25-e01c78b74fbd" (UID: "a1c7c343-44ef-464b-bd25-e01c78b74fbd"). InnerVolumeSpecName "kube-api-access-4srhw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.879541 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a1c7c343-44ef-464b-bd25-e01c78b74fbd" (UID: "a1c7c343-44ef-464b-bd25-e01c78b74fbd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.920342 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4srhw\" (UniqueName: \"kubernetes.io/projected/a1c7c343-44ef-464b-bd25-e01c78b74fbd-kube-api-access-4srhw\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.920411 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:50 crc kubenswrapper[4780]: I1210 11:22:50.920434 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1c7c343-44ef-464b-bd25-e01c78b74fbd-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.007527 4780 generic.go:334] "Generic (PLEG): container finished" podID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerID="13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255" exitCode=0 Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.007600 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dxw2" event={"ID":"a1c7c343-44ef-464b-bd25-e01c78b74fbd","Type":"ContainerDied","Data":"13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255"} Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.007642 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4dxw2" event={"ID":"a1c7c343-44ef-464b-bd25-e01c78b74fbd","Type":"ContainerDied","Data":"5a08a4683012c28a007d58215176798a3f6ee6610384ccc4bc18d1ff2fde1051"} Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.007695 4780 scope.go:117] "RemoveContainer" containerID="13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.007761 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4dxw2" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.042939 4780 scope.go:117] "RemoveContainer" containerID="f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.074958 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4dxw2"] Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.092309 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4dxw2"] Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.107292 4780 scope.go:117] "RemoveContainer" containerID="a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.146477 4780 scope.go:117] "RemoveContainer" containerID="13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255" Dec 10 11:22:51 crc kubenswrapper[4780]: E1210 11:22:51.152543 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255\": container with ID starting with 13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255 not found: ID does not exist" containerID="13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.152650 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255"} err="failed to get container status \"13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255\": rpc error: code = NotFound desc = could not find container \"13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255\": container with ID starting with 13a91f507faef51e367a3f0edf67961817474442736f588a2e5d792d4e5c4255 not found: ID does not exist" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.152700 4780 scope.go:117] "RemoveContainer" containerID="f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25" Dec 10 11:22:51 crc kubenswrapper[4780]: E1210 11:22:51.153562 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25\": container with ID starting with f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25 not found: ID does not exist" containerID="f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.153644 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25"} err="failed to get container status \"f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25\": rpc error: code = NotFound desc = could not find container \"f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25\": container with ID starting with f4ebee6bab56d028f0aee13464d003c7b9c409a3165af9522376fc5418da5e25 not found: ID does not exist" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.153690 4780 scope.go:117] "RemoveContainer" containerID="a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765" Dec 10 11:22:51 crc kubenswrapper[4780]: E1210 11:22:51.154249 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765\": container with ID starting with a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765 not found: ID does not exist" containerID="a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.154315 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765"} err="failed to get container status \"a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765\": rpc error: code = NotFound desc = could not find container \"a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765\": container with ID starting with a9db8de4eaf496e58bdb8b2370802bcd3656577e8117be370d1990333f56c765 not found: ID does not exist" Dec 10 11:22:51 crc kubenswrapper[4780]: I1210 11:22:51.980840 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" path="/var/lib/kubelet/pods/a1c7c343-44ef-464b-bd25-e01c78b74fbd/volumes" Dec 10 11:22:52 crc kubenswrapper[4780]: E1210 11:22:52.962224 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:22:55 crc kubenswrapper[4780]: E1210 11:22:55.981587 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:23:05 crc kubenswrapper[4780]: E1210 11:23:05.976996 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:23:08 crc kubenswrapper[4780]: E1210 11:23:08.965870 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:23:19 crc kubenswrapper[4780]: E1210 11:23:19.963117 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:23:21 crc kubenswrapper[4780]: E1210 11:23:21.962140 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:23:25 crc kubenswrapper[4780]: I1210 11:23:25.072876 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-ab68-account-create-update-ss7hk"] Dec 10 11:23:25 crc kubenswrapper[4780]: I1210 11:23:25.092651 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-ab68-account-create-update-ss7hk"] Dec 10 11:23:25 crc kubenswrapper[4780]: I1210 11:23:25.980612 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd881333-f3fb-4c4d-b31e-f755f9c1271d" path="/var/lib/kubelet/pods/bd881333-f3fb-4c4d-b31e-f755f9c1271d/volumes" Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.058407 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-sms69"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.074339 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-vwf8h"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.093270 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-1476-account-create-update-wszkq"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.107108 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-sws85"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.119620 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-vwf8h"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.151534 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-sms69"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.177437 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-bfhjg"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.196261 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-2e1e-account-create-update-xshf7"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.214868 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-1476-account-create-update-wszkq"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.231780 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-sws85"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.250001 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-bfhjg"] Dec 10 11:23:26 crc kubenswrapper[4780]: I1210 11:23:26.266212 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-2e1e-account-create-update-xshf7"] Dec 10 11:23:27 crc kubenswrapper[4780]: I1210 11:23:27.041539 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-3721-account-create-update-gbldb"] Dec 10 11:23:27 crc kubenswrapper[4780]: I1210 11:23:27.055108 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-3721-account-create-update-gbldb"] Dec 10 11:23:28 crc kubenswrapper[4780]: I1210 11:23:28.547124 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="002c5229-a237-4f6b-a323-f28d0eb09124" path="/var/lib/kubelet/pods/002c5229-a237-4f6b-a323-f28d0eb09124/volumes" Dec 10 11:23:28 crc kubenswrapper[4780]: I1210 11:23:28.560906 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d18f911-380d-4e2d-915b-18621871d0f5" path="/var/lib/kubelet/pods/1d18f911-380d-4e2d-915b-18621871d0f5/volumes" Dec 10 11:23:28 crc kubenswrapper[4780]: I1210 11:23:28.568453 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a999d24-37c5-4026-9b4a-31e3642077e1" path="/var/lib/kubelet/pods/5a999d24-37c5-4026-9b4a-31e3642077e1/volumes" Dec 10 11:23:28 crc kubenswrapper[4780]: I1210 11:23:28.570405 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bc9d7bc-3890-4523-b805-9cd4c167fd9b" path="/var/lib/kubelet/pods/8bc9d7bc-3890-4523-b805-9cd4c167fd9b/volumes" Dec 10 11:23:28 crc kubenswrapper[4780]: I1210 11:23:28.576882 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b65fc796-3b7d-44e5-98eb-898c371a7174" path="/var/lib/kubelet/pods/b65fc796-3b7d-44e5-98eb-898c371a7174/volumes" Dec 10 11:23:28 crc kubenswrapper[4780]: I1210 11:23:28.578438 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6e99715-0d2d-4998-899e-58c68d7db78a" path="/var/lib/kubelet/pods/c6e99715-0d2d-4998-899e-58c68d7db78a/volumes" Dec 10 11:23:28 crc kubenswrapper[4780]: I1210 11:23:28.579577 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df258243-e086-4015-829c-01101a52b26e" path="/var/lib/kubelet/pods/df258243-e086-4015-829c-01101a52b26e/volumes" Dec 10 11:23:32 crc kubenswrapper[4780]: E1210 11:23:32.963344 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:23:36 crc kubenswrapper[4780]: E1210 11:23:36.407216 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:23:37 crc kubenswrapper[4780]: I1210 11:23:37.061232 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-69b5-account-create-update-4q87k"] Dec 10 11:23:37 crc kubenswrapper[4780]: I1210 11:23:37.076387 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq"] Dec 10 11:23:37 crc kubenswrapper[4780]: I1210 11:23:37.093570 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-69b5-account-create-update-4q87k"] Dec 10 11:23:37 crc kubenswrapper[4780]: I1210 11:23:37.111026 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-r5ntq"] Dec 10 11:23:37 crc kubenswrapper[4780]: I1210 11:23:37.978345 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3701bf21-11ac-4fea-8e61-f5b837dcf713" path="/var/lib/kubelet/pods/3701bf21-11ac-4fea-8e61-f5b837dcf713/volumes" Dec 10 11:23:37 crc kubenswrapper[4780]: I1210 11:23:37.979752 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e0f03ab-2e1d-406a-a4a0-c699b5a68e90" path="/var/lib/kubelet/pods/5e0f03ab-2e1d-406a-a4a0-c699b5a68e90/volumes" Dec 10 11:23:41 crc kubenswrapper[4780]: I1210 11:23:41.768888 4780 scope.go:117] "RemoveContainer" containerID="626c0c093995ce580fcfbc3504e137478edbffdc5bd41f95d51d4b9c88916e78" Dec 10 11:23:41 crc kubenswrapper[4780]: I1210 11:23:41.808359 4780 scope.go:117] "RemoveContainer" containerID="c24671d4d85b16e517f5138e655fffe93008cbcb9c8383aba20be513e9ec5e8c" Dec 10 11:23:41 crc kubenswrapper[4780]: I1210 11:23:41.894232 4780 scope.go:117] "RemoveContainer" containerID="07ea6848dda70e7980c76502a9b5d5b203e25ca1a5790393a548aae5e22f3c96" Dec 10 11:23:41 crc kubenswrapper[4780]: I1210 11:23:41.958386 4780 scope.go:117] "RemoveContainer" containerID="8e46cfb55ce036b037d8dcf5b5ee9af30c938e54396d6effc1e3c728949b5a25" Dec 10 11:23:42 crc kubenswrapper[4780]: I1210 11:23:42.030329 4780 scope.go:117] "RemoveContainer" containerID="a710a8ec6e7b61ce3c285a19fab368a1c68a89886775139086243d00f6cb3af4" Dec 10 11:23:42 crc kubenswrapper[4780]: I1210 11:23:42.102694 4780 scope.go:117] "RemoveContainer" containerID="3cca7f3649b3134d461ac278be985fe5a5bc49056f19b7c92815a3ec10122df8" Dec 10 11:23:42 crc kubenswrapper[4780]: I1210 11:23:42.180609 4780 scope.go:117] "RemoveContainer" containerID="f4770b342ae8db996830be5ee078da9ada45b1ed1159abcfa4aa9cbe2660bb95" Dec 10 11:23:42 crc kubenswrapper[4780]: I1210 11:23:42.213032 4780 scope.go:117] "RemoveContainer" containerID="0e95fdb4ab1d59be8fef5445bfa536b18870a7d7e7d938a8b087169317427afa" Dec 10 11:23:42 crc kubenswrapper[4780]: I1210 11:23:42.243397 4780 scope.go:117] "RemoveContainer" containerID="53fa60f7a7b2ec54ab179f0eb28668d7df18f1b54aaf119b9c48ae469fc9867f" Dec 10 11:23:42 crc kubenswrapper[4780]: I1210 11:23:42.286978 4780 scope.go:117] "RemoveContainer" containerID="90a35e60ec99a8c5e69f1eb51dc947804ced21f35ecd00644bcd940c803e0ebc" Dec 10 11:23:46 crc kubenswrapper[4780]: E1210 11:23:46.104596 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:23:46 crc kubenswrapper[4780]: E1210 11:23:46.105447 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:23:46 crc kubenswrapper[4780]: E1210 11:23:46.105656 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:23:46 crc kubenswrapper[4780]: E1210 11:23:46.106961 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:23:47 crc kubenswrapper[4780]: E1210 11:23:47.963027 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:23:56 crc kubenswrapper[4780]: E1210 11:23:56.965941 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:23:59 crc kubenswrapper[4780]: I1210 11:23:59.963688 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:24:00 crc kubenswrapper[4780]: E1210 11:24:00.095887 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:24:00 crc kubenswrapper[4780]: E1210 11:24:00.095992 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:24:00 crc kubenswrapper[4780]: E1210 11:24:00.096243 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:24:00 crc kubenswrapper[4780]: E1210 11:24:00.097903 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:24:09 crc kubenswrapper[4780]: E1210 11:24:09.962214 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:24:10 crc kubenswrapper[4780]: E1210 11:24:10.963743 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.057542 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-k47wv"] Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.071541 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-b224-account-create-update-klk96"] Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.088016 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-g2bln"] Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.102223 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-g2bln"] Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.120377 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-b224-account-create-update-klk96"] Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.134682 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-k47wv"] Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.975710 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2acffbad-6bc8-4c72-b800-c94b25aeb1bc" path="/var/lib/kubelet/pods/2acffbad-6bc8-4c72-b800-c94b25aeb1bc/volumes" Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.976834 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cce757bb-1e72-440d-97e9-63b08f5aa63b" path="/var/lib/kubelet/pods/cce757bb-1e72-440d-97e9-63b08f5aa63b/volumes" Dec 10 11:24:13 crc kubenswrapper[4780]: I1210 11:24:13.977820 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd7baf7c-6124-4c2f-ade4-302171c1e93f" path="/var/lib/kubelet/pods/cd7baf7c-6124-4c2f-ade4-302171c1e93f/volumes" Dec 10 11:24:14 crc kubenswrapper[4780]: I1210 11:24:14.049137 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-b379-account-create-update-7m27l"] Dec 10 11:24:14 crc kubenswrapper[4780]: I1210 11:24:14.070449 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-mnfzx"] Dec 10 11:24:14 crc kubenswrapper[4780]: I1210 11:24:14.085666 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-dbhzs"] Dec 10 11:24:14 crc kubenswrapper[4780]: I1210 11:24:14.098275 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-b379-account-create-update-7m27l"] Dec 10 11:24:14 crc kubenswrapper[4780]: I1210 11:24:14.139044 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-mnfzx"] Dec 10 11:24:14 crc kubenswrapper[4780]: I1210 11:24:14.155618 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-dbhzs"] Dec 10 11:24:15 crc kubenswrapper[4780]: I1210 11:24:15.978658 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33cacd55-bfbd-437d-9a42-0a883b479efa" path="/var/lib/kubelet/pods/33cacd55-bfbd-437d-9a42-0a883b479efa/volumes" Dec 10 11:24:15 crc kubenswrapper[4780]: I1210 11:24:15.979555 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d7da3d7-458f-4cd4-8a91-432c7395bdcc" path="/var/lib/kubelet/pods/4d7da3d7-458f-4cd4-8a91-432c7395bdcc/volumes" Dec 10 11:24:15 crc kubenswrapper[4780]: I1210 11:24:15.981230 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="773f068b-e55b-4207-b91b-3fe664bfaec5" path="/var/lib/kubelet/pods/773f068b-e55b-4207-b91b-3fe664bfaec5/volumes" Dec 10 11:24:16 crc kubenswrapper[4780]: I1210 11:24:16.048044 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-e8d6-account-create-update-75x74"] Dec 10 11:24:16 crc kubenswrapper[4780]: I1210 11:24:16.060818 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-e8d6-account-create-update-75x74"] Dec 10 11:24:16 crc kubenswrapper[4780]: I1210 11:24:16.072822 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-5b53-account-create-update-rczqg"] Dec 10 11:24:16 crc kubenswrapper[4780]: I1210 11:24:16.086830 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-5b53-account-create-update-rczqg"] Dec 10 11:24:17 crc kubenswrapper[4780]: I1210 11:24:17.979203 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57e27a9d-d862-41f8-b10d-b28fb268f91c" path="/var/lib/kubelet/pods/57e27a9d-d862-41f8-b10d-b28fb268f91c/volumes" Dec 10 11:24:17 crc kubenswrapper[4780]: I1210 11:24:17.981513 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b734f2d8-19e5-4d25-882b-0f3a468dcde7" path="/var/lib/kubelet/pods/b734f2d8-19e5-4d25-882b-0f3a468dcde7/volumes" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.057036 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kjllg"] Dec 10 11:24:22 crc kubenswrapper[4780]: E1210 11:24:22.058611 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="extract-content" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.058635 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="extract-content" Dec 10 11:24:22 crc kubenswrapper[4780]: E1210 11:24:22.058660 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="registry-server" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.058667 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="registry-server" Dec 10 11:24:22 crc kubenswrapper[4780]: E1210 11:24:22.058711 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="extract-utilities" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.058719 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="extract-utilities" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.059122 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1c7c343-44ef-464b-bd25-e01c78b74fbd" containerName="registry-server" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.061260 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.075184 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjllg"] Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.125219 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-utilities\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.125777 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-catalog-content\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.125852 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6nw2\" (UniqueName: \"kubernetes.io/projected/4b423690-c3d3-4205-ab70-990085ef5810-kube-api-access-f6nw2\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.231251 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-catalog-content\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.231401 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6nw2\" (UniqueName: \"kubernetes.io/projected/4b423690-c3d3-4205-ab70-990085ef5810-kube-api-access-f6nw2\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.231734 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-utilities\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.232663 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-catalog-content\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.232865 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-utilities\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.258155 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6nw2\" (UniqueName: \"kubernetes.io/projected/4b423690-c3d3-4205-ab70-990085ef5810-kube-api-access-f6nw2\") pod \"redhat-marketplace-kjllg\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:22 crc kubenswrapper[4780]: I1210 11:24:22.395963 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:23 crc kubenswrapper[4780]: E1210 11:24:23.437171 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:24:23 crc kubenswrapper[4780]: E1210 11:24:23.437360 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:24:23 crc kubenswrapper[4780]: I1210 11:24:23.482815 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjllg"] Dec 10 11:24:24 crc kubenswrapper[4780]: I1210 11:24:24.458850 4780 generic.go:334] "Generic (PLEG): container finished" podID="4b423690-c3d3-4205-ab70-990085ef5810" containerID="6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456" exitCode=0 Dec 10 11:24:24 crc kubenswrapper[4780]: I1210 11:24:24.459079 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjllg" event={"ID":"4b423690-c3d3-4205-ab70-990085ef5810","Type":"ContainerDied","Data":"6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456"} Dec 10 11:24:24 crc kubenswrapper[4780]: I1210 11:24:24.459407 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjllg" event={"ID":"4b423690-c3d3-4205-ab70-990085ef5810","Type":"ContainerStarted","Data":"a80360d4cd3eeb1727861ffe3cd4c0f48b8c18e4e291d41a4b4b488405e05ebd"} Dec 10 11:24:26 crc kubenswrapper[4780]: I1210 11:24:26.497033 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjllg" event={"ID":"4b423690-c3d3-4205-ab70-990085ef5810","Type":"ContainerStarted","Data":"aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6"} Dec 10 11:24:27 crc kubenswrapper[4780]: I1210 11:24:27.514681 4780 generic.go:334] "Generic (PLEG): container finished" podID="4b423690-c3d3-4205-ab70-990085ef5810" containerID="aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6" exitCode=0 Dec 10 11:24:27 crc kubenswrapper[4780]: I1210 11:24:27.514771 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjllg" event={"ID":"4b423690-c3d3-4205-ab70-990085ef5810","Type":"ContainerDied","Data":"aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6"} Dec 10 11:24:28 crc kubenswrapper[4780]: I1210 11:24:28.532505 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjllg" event={"ID":"4b423690-c3d3-4205-ab70-990085ef5810","Type":"ContainerStarted","Data":"e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d"} Dec 10 11:24:28 crc kubenswrapper[4780]: I1210 11:24:28.565353 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kjllg" podStartSLOduration=3.012848231 podStartE2EDuration="6.565317804s" podCreationTimestamp="2025-12-10 11:24:22 +0000 UTC" firstStartedPulling="2025-12-10 11:24:24.461812163 +0000 UTC m=+2369.315205606" lastFinishedPulling="2025-12-10 11:24:28.014281736 +0000 UTC m=+2372.867675179" observedRunningTime="2025-12-10 11:24:28.551194291 +0000 UTC m=+2373.404587744" watchObservedRunningTime="2025-12-10 11:24:28.565317804 +0000 UTC m=+2373.418711257" Dec 10 11:24:32 crc kubenswrapper[4780]: I1210 11:24:32.397805 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:32 crc kubenswrapper[4780]: I1210 11:24:32.403112 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:32 crc kubenswrapper[4780]: I1210 11:24:32.488123 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:33 crc kubenswrapper[4780]: I1210 11:24:33.674135 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:33 crc kubenswrapper[4780]: I1210 11:24:33.735991 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjllg"] Dec 10 11:24:33 crc kubenswrapper[4780]: E1210 11:24:33.963542 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:24:34 crc kubenswrapper[4780]: E1210 11:24:34.961580 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:24:35 crc kubenswrapper[4780]: I1210 11:24:35.637988 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kjllg" podUID="4b423690-c3d3-4205-ab70-990085ef5810" containerName="registry-server" containerID="cri-o://e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d" gracePeriod=2 Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.323080 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.435272 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-utilities\") pod \"4b423690-c3d3-4205-ab70-990085ef5810\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.435775 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-catalog-content\") pod \"4b423690-c3d3-4205-ab70-990085ef5810\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.435852 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6nw2\" (UniqueName: \"kubernetes.io/projected/4b423690-c3d3-4205-ab70-990085ef5810-kube-api-access-f6nw2\") pod \"4b423690-c3d3-4205-ab70-990085ef5810\" (UID: \"4b423690-c3d3-4205-ab70-990085ef5810\") " Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.438958 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-utilities" (OuterVolumeSpecName: "utilities") pod "4b423690-c3d3-4205-ab70-990085ef5810" (UID: "4b423690-c3d3-4205-ab70-990085ef5810"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.447628 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b423690-c3d3-4205-ab70-990085ef5810-kube-api-access-f6nw2" (OuterVolumeSpecName: "kube-api-access-f6nw2") pod "4b423690-c3d3-4205-ab70-990085ef5810" (UID: "4b423690-c3d3-4205-ab70-990085ef5810"). InnerVolumeSpecName "kube-api-access-f6nw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.464728 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b423690-c3d3-4205-ab70-990085ef5810" (UID: "4b423690-c3d3-4205-ab70-990085ef5810"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.540928 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.540979 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6nw2\" (UniqueName: \"kubernetes.io/projected/4b423690-c3d3-4205-ab70-990085ef5810-kube-api-access-f6nw2\") on node \"crc\" DevicePath \"\"" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.540996 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b423690-c3d3-4205-ab70-990085ef5810-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.660852 4780 generic.go:334] "Generic (PLEG): container finished" podID="4b423690-c3d3-4205-ab70-990085ef5810" containerID="e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d" exitCode=0 Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.660951 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjllg" event={"ID":"4b423690-c3d3-4205-ab70-990085ef5810","Type":"ContainerDied","Data":"e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d"} Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.660998 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjllg" event={"ID":"4b423690-c3d3-4205-ab70-990085ef5810","Type":"ContainerDied","Data":"a80360d4cd3eeb1727861ffe3cd4c0f48b8c18e4e291d41a4b4b488405e05ebd"} Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.661030 4780 scope.go:117] "RemoveContainer" containerID="e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.661056 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjllg" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.703674 4780 scope.go:117] "RemoveContainer" containerID="aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.717719 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjllg"] Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.731605 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjllg"] Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.749867 4780 scope.go:117] "RemoveContainer" containerID="6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.800347 4780 scope.go:117] "RemoveContainer" containerID="e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d" Dec 10 11:24:36 crc kubenswrapper[4780]: E1210 11:24:36.801565 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d\": container with ID starting with e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d not found: ID does not exist" containerID="e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.801649 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d"} err="failed to get container status \"e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d\": rpc error: code = NotFound desc = could not find container \"e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d\": container with ID starting with e8f3a91ac188e81cf74f2db0544b12b42a986e44a8e9b3eee4f602d74e57a42d not found: ID does not exist" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.801690 4780 scope.go:117] "RemoveContainer" containerID="aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6" Dec 10 11:24:36 crc kubenswrapper[4780]: E1210 11:24:36.802430 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6\": container with ID starting with aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6 not found: ID does not exist" containerID="aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.802462 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6"} err="failed to get container status \"aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6\": rpc error: code = NotFound desc = could not find container \"aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6\": container with ID starting with aa89476d55e5b87ebbbd5ca50cfcf60509dbe2460aaa312a15b489cf949341f6 not found: ID does not exist" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.802480 4780 scope.go:117] "RemoveContainer" containerID="6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456" Dec 10 11:24:36 crc kubenswrapper[4780]: E1210 11:24:36.803021 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456\": container with ID starting with 6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456 not found: ID does not exist" containerID="6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456" Dec 10 11:24:36 crc kubenswrapper[4780]: I1210 11:24:36.803054 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456"} err="failed to get container status \"6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456\": rpc error: code = NotFound desc = could not find container \"6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456\": container with ID starting with 6fd147e50958a61fa61abed42a615d4e904fccfcfbbc57e4384aeac900116456 not found: ID does not exist" Dec 10 11:24:37 crc kubenswrapper[4780]: I1210 11:24:37.068743 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-td4vw"] Dec 10 11:24:37 crc kubenswrapper[4780]: I1210 11:24:37.083020 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-td4vw"] Dec 10 11:24:37 crc kubenswrapper[4780]: I1210 11:24:37.973878 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b423690-c3d3-4205-ab70-990085ef5810" path="/var/lib/kubelet/pods/4b423690-c3d3-4205-ab70-990085ef5810/volumes" Dec 10 11:24:38 crc kubenswrapper[4780]: I1210 11:24:37.975463 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d94adae5-67fa-4707-9139-8bd4537a7e77" path="/var/lib/kubelet/pods/d94adae5-67fa-4707-9139-8bd4537a7e77/volumes" Dec 10 11:24:41 crc kubenswrapper[4780]: I1210 11:24:41.288586 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-m9l8h"] Dec 10 11:24:41 crc kubenswrapper[4780]: I1210 11:24:41.306278 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-m9l8h"] Dec 10 11:24:41 crc kubenswrapper[4780]: I1210 11:24:41.977297 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="626bc022-de20-4c32-ad5b-bd22a54340ce" path="/var/lib/kubelet/pods/626bc022-de20-4c32-ad5b-bd22a54340ce/volumes" Dec 10 11:24:42 crc kubenswrapper[4780]: I1210 11:24:42.621755 4780 scope.go:117] "RemoveContainer" containerID="7bc4e3bae0bf3baf0d6683cdd1adb229b90ea53d949500e9891ac05e2323d784" Dec 10 11:24:42 crc kubenswrapper[4780]: I1210 11:24:42.685747 4780 scope.go:117] "RemoveContainer" containerID="4317dfe2f5cc63cd1b8ee6d404454d8bc94a82afc79a7a33c8e583f41a216306" Dec 10 11:24:42 crc kubenswrapper[4780]: I1210 11:24:42.727679 4780 scope.go:117] "RemoveContainer" containerID="27ce8d4d83dab1897d1108d83fbe0df7c06ef7cf970b5cb8416b7c1cd68fdf4d" Dec 10 11:24:42 crc kubenswrapper[4780]: I1210 11:24:42.792285 4780 scope.go:117] "RemoveContainer" containerID="c0507c16b5ffcf039df86cb1cef6f8cb84cd34ce350c52b957e2bdc58d9d3487" Dec 10 11:24:42 crc kubenswrapper[4780]: I1210 11:24:42.865337 4780 scope.go:117] "RemoveContainer" containerID="c3498a5c7e74020e0637f139a6238376da4b144ea8ae931f302b2b46fb07c135" Dec 10 11:24:42 crc kubenswrapper[4780]: I1210 11:24:42.920401 4780 scope.go:117] "RemoveContainer" containerID="261a7961c51677ddc56262ede3057a53c0782d8ce07324f9493deb6671633f93" Dec 10 11:24:42 crc kubenswrapper[4780]: I1210 11:24:42.982777 4780 scope.go:117] "RemoveContainer" containerID="55dd627b492eb039320b792bcaa7636eaa93d533d9ca07479d6db5284a53cb0a" Dec 10 11:24:43 crc kubenswrapper[4780]: I1210 11:24:43.015762 4780 scope.go:117] "RemoveContainer" containerID="1c2623bba33440e660cbfa0e9390743929e58f0f5a740cec224c847161ea0be1" Dec 10 11:24:43 crc kubenswrapper[4780]: I1210 11:24:43.046254 4780 scope.go:117] "RemoveContainer" containerID="80a2100200c8fa235445633119dd010380c3a35c72924ce23b90fa974d894877" Dec 10 11:24:43 crc kubenswrapper[4780]: I1210 11:24:43.076454 4780 scope.go:117] "RemoveContainer" containerID="fc8064f2fffd4a43901b5f8b7568dcfc910c5ad47bc6953e2059698b789f2907" Dec 10 11:24:47 crc kubenswrapper[4780]: E1210 11:24:47.962411 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:24:49 crc kubenswrapper[4780]: E1210 11:24:49.362065 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:24:57 crc kubenswrapper[4780]: I1210 11:24:57.475473 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:24:57 crc kubenswrapper[4780]: I1210 11:24:57.476410 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:25:01 crc kubenswrapper[4780]: E1210 11:25:01.964438 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:25:02 crc kubenswrapper[4780]: E1210 11:25:02.962178 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:25:13 crc kubenswrapper[4780]: E1210 11:25:13.963064 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:25:14 crc kubenswrapper[4780]: E1210 11:25:14.962157 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:25:27 crc kubenswrapper[4780]: I1210 11:25:27.476561 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:25:27 crc kubenswrapper[4780]: I1210 11:25:27.477498 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:25:28 crc kubenswrapper[4780]: E1210 11:25:28.962413 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:25:28 crc kubenswrapper[4780]: E1210 11:25:28.962687 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:25:40 crc kubenswrapper[4780]: E1210 11:25:40.963228 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:25:41 crc kubenswrapper[4780]: E1210 11:25:41.962350 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:25:43 crc kubenswrapper[4780]: I1210 11:25:43.616559 4780 scope.go:117] "RemoveContainer" containerID="04c6453086c9006225a89468ef8172eeb34cc49a69eb5b5d8d50eb37105d630a" Dec 10 11:25:43 crc kubenswrapper[4780]: I1210 11:25:43.660638 4780 scope.go:117] "RemoveContainer" containerID="695dec36217ee2ffa30feaa495b93820a10b03452cbb95b902ac64304276af8d" Dec 10 11:25:50 crc kubenswrapper[4780]: I1210 11:25:50.100519 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-hzmqc"] Dec 10 11:25:50 crc kubenswrapper[4780]: I1210 11:25:50.117366 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-x57qm"] Dec 10 11:25:50 crc kubenswrapper[4780]: I1210 11:25:50.134109 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-hzmqc"] Dec 10 11:25:50 crc kubenswrapper[4780]: I1210 11:25:50.146844 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-x57qm"] Dec 10 11:25:51 crc kubenswrapper[4780]: I1210 11:25:51.975151 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61593545-0480-4729-b6d1-ba4089e68f7a" path="/var/lib/kubelet/pods/61593545-0480-4729-b6d1-ba4089e68f7a/volumes" Dec 10 11:25:51 crc kubenswrapper[4780]: I1210 11:25:51.977575 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79f8dd0a-b900-4a3b-a2ab-262861daedc6" path="/var/lib/kubelet/pods/79f8dd0a-b900-4a3b-a2ab-262861daedc6/volumes" Dec 10 11:25:53 crc kubenswrapper[4780]: E1210 11:25:53.962443 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:25:55 crc kubenswrapper[4780]: E1210 11:25:55.973036 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:25:57 crc kubenswrapper[4780]: I1210 11:25:57.476357 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:25:57 crc kubenswrapper[4780]: I1210 11:25:57.476958 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:25:57 crc kubenswrapper[4780]: I1210 11:25:57.477041 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:25:57 crc kubenswrapper[4780]: I1210 11:25:57.478600 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:25:57 crc kubenswrapper[4780]: I1210 11:25:57.478726 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" gracePeriod=600 Dec 10 11:25:58 crc kubenswrapper[4780]: E1210 11:25:58.774484 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:25:59 crc kubenswrapper[4780]: I1210 11:25:59.186216 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" exitCode=0 Dec 10 11:25:59 crc kubenswrapper[4780]: I1210 11:25:59.186319 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208"} Dec 10 11:25:59 crc kubenswrapper[4780]: I1210 11:25:59.186411 4780 scope.go:117] "RemoveContainer" containerID="a4c56cbf13f0b58a88ae470c0b33e021ebda5393c2067f7ccb2e5ac1ebff5108" Dec 10 11:25:59 crc kubenswrapper[4780]: I1210 11:25:59.191210 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:25:59 crc kubenswrapper[4780]: E1210 11:25:59.191857 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:26:05 crc kubenswrapper[4780]: I1210 11:26:05.288138 4780 generic.go:334] "Generic (PLEG): container finished" podID="51663ab7-946b-4356-b694-5ba7132781f4" containerID="2ea8d02715fa0c0643c8aa8e41af81a506916a8ba477b75124c902b883f1e6b1" exitCode=0 Dec 10 11:26:05 crc kubenswrapper[4780]: I1210 11:26:05.288285 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" event={"ID":"51663ab7-946b-4356-b694-5ba7132781f4","Type":"ContainerDied","Data":"2ea8d02715fa0c0643c8aa8e41af81a506916a8ba477b75124c902b883f1e6b1"} Dec 10 11:26:06 crc kubenswrapper[4780]: I1210 11:26:06.938228 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.083230 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-bootstrap-combined-ca-bundle\") pod \"51663ab7-946b-4356-b694-5ba7132781f4\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.083496 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-inventory\") pod \"51663ab7-946b-4356-b694-5ba7132781f4\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.083827 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvdl9\" (UniqueName: \"kubernetes.io/projected/51663ab7-946b-4356-b694-5ba7132781f4-kube-api-access-mvdl9\") pod \"51663ab7-946b-4356-b694-5ba7132781f4\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.083868 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-ssh-key\") pod \"51663ab7-946b-4356-b694-5ba7132781f4\" (UID: \"51663ab7-946b-4356-b694-5ba7132781f4\") " Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.092188 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51663ab7-946b-4356-b694-5ba7132781f4-kube-api-access-mvdl9" (OuterVolumeSpecName: "kube-api-access-mvdl9") pod "51663ab7-946b-4356-b694-5ba7132781f4" (UID: "51663ab7-946b-4356-b694-5ba7132781f4"). InnerVolumeSpecName "kube-api-access-mvdl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.092610 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "51663ab7-946b-4356-b694-5ba7132781f4" (UID: "51663ab7-946b-4356-b694-5ba7132781f4"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.128425 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-inventory" (OuterVolumeSpecName: "inventory") pod "51663ab7-946b-4356-b694-5ba7132781f4" (UID: "51663ab7-946b-4356-b694-5ba7132781f4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.130344 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "51663ab7-946b-4356-b694-5ba7132781f4" (UID: "51663ab7-946b-4356-b694-5ba7132781f4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.198891 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvdl9\" (UniqueName: \"kubernetes.io/projected/51663ab7-946b-4356-b694-5ba7132781f4-kube-api-access-mvdl9\") on node \"crc\" DevicePath \"\"" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.198967 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.198987 4780 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.199000 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51663ab7-946b-4356-b694-5ba7132781f4-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.319901 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" event={"ID":"51663ab7-946b-4356-b694-5ba7132781f4","Type":"ContainerDied","Data":"fa98564ba4c3540c237d6e9bc8ed65c5d63155372bff6b5519a35cfaf1e40afc"} Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.320020 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.320271 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa98564ba4c3540c237d6e9bc8ed65c5d63155372bff6b5519a35cfaf1e40afc" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.481312 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk"] Dec 10 11:26:07 crc kubenswrapper[4780]: E1210 11:26:07.482081 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b423690-c3d3-4205-ab70-990085ef5810" containerName="extract-content" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.482106 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b423690-c3d3-4205-ab70-990085ef5810" containerName="extract-content" Dec 10 11:26:07 crc kubenswrapper[4780]: E1210 11:26:07.482150 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b423690-c3d3-4205-ab70-990085ef5810" containerName="registry-server" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.482162 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b423690-c3d3-4205-ab70-990085ef5810" containerName="registry-server" Dec 10 11:26:07 crc kubenswrapper[4780]: E1210 11:26:07.482191 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51663ab7-946b-4356-b694-5ba7132781f4" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.482204 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="51663ab7-946b-4356-b694-5ba7132781f4" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 11:26:07 crc kubenswrapper[4780]: E1210 11:26:07.482222 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b423690-c3d3-4205-ab70-990085ef5810" containerName="extract-utilities" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.482230 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b423690-c3d3-4205-ab70-990085ef5810" containerName="extract-utilities" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.482639 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="51663ab7-946b-4356-b694-5ba7132781f4" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.482664 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b423690-c3d3-4205-ab70-990085ef5810" containerName="registry-server" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.483905 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.493270 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.493324 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.494184 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.494544 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.547347 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk"] Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.616436 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.616716 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jsls\" (UniqueName: \"kubernetes.io/projected/eaf4555e-3d94-4509-b81c-2de2321cff58-kube-api-access-4jsls\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.617164 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.720420 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.720624 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jsls\" (UniqueName: \"kubernetes.io/projected/eaf4555e-3d94-4509-b81c-2de2321cff58-kube-api-access-4jsls\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.720801 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.728450 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.732746 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.750499 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jsls\" (UniqueName: \"kubernetes.io/projected/eaf4555e-3d94-4509-b81c-2de2321cff58-kube-api-access-4jsls\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: I1210 11:26:07.819798 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:26:07 crc kubenswrapper[4780]: E1210 11:26:07.963796 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:26:08 crc kubenswrapper[4780]: I1210 11:26:08.526557 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk"] Dec 10 11:26:08 crc kubenswrapper[4780]: E1210 11:26:08.960812 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:26:09 crc kubenswrapper[4780]: I1210 11:26:09.356547 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" event={"ID":"eaf4555e-3d94-4509-b81c-2de2321cff58","Type":"ContainerStarted","Data":"9f99dbf6f4b834a001a1934c8f4c031bf32d90d2b2a57df5d95970490034fcab"} Dec 10 11:26:11 crc kubenswrapper[4780]: I1210 11:26:11.081188 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-hn59m"] Dec 10 11:26:11 crc kubenswrapper[4780]: I1210 11:26:11.098632 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-cwb2h"] Dec 10 11:26:11 crc kubenswrapper[4780]: I1210 11:26:11.113264 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-hn59m"] Dec 10 11:26:11 crc kubenswrapper[4780]: I1210 11:26:11.123846 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-cwb2h"] Dec 10 11:26:11 crc kubenswrapper[4780]: I1210 11:26:11.959642 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:26:11 crc kubenswrapper[4780]: E1210 11:26:11.960839 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:26:11 crc kubenswrapper[4780]: I1210 11:26:11.981424 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0613207e-c071-4295-a536-f037ee6fe446" path="/var/lib/kubelet/pods/0613207e-c071-4295-a536-f037ee6fe446/volumes" Dec 10 11:26:11 crc kubenswrapper[4780]: I1210 11:26:11.982729 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9" path="/var/lib/kubelet/pods/1af5d3ff-e54a-4a1a-89f4-b105fe6dbbd9/volumes" Dec 10 11:26:12 crc kubenswrapper[4780]: I1210 11:26:12.412835 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" event={"ID":"eaf4555e-3d94-4509-b81c-2de2321cff58","Type":"ContainerStarted","Data":"d446744984d0168ee1d4e57ca4f6b6fe1b4b75c9ac6e1c9e9e8ddc9a86a05069"} Dec 10 11:26:12 crc kubenswrapper[4780]: I1210 11:26:12.447321 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" podStartSLOduration=2.9712079769999997 podStartE2EDuration="5.447291544s" podCreationTimestamp="2025-12-10 11:26:07 +0000 UTC" firstStartedPulling="2025-12-10 11:26:08.537629329 +0000 UTC m=+2473.391022772" lastFinishedPulling="2025-12-10 11:26:11.013712896 +0000 UTC m=+2475.867106339" observedRunningTime="2025-12-10 11:26:12.437369711 +0000 UTC m=+2477.290763154" watchObservedRunningTime="2025-12-10 11:26:12.447291544 +0000 UTC m=+2477.300684997" Dec 10 11:26:13 crc kubenswrapper[4780]: I1210 11:26:13.984491 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8lnvf"] Dec 10 11:26:13 crc kubenswrapper[4780]: I1210 11:26:13.989477 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:13 crc kubenswrapper[4780]: I1210 11:26:13.998794 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8lnvf"] Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.021093 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvr4c\" (UniqueName: \"kubernetes.io/projected/6465726d-2c55-4619-8cf1-3e4e28e05580-kube-api-access-qvr4c\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.022599 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-utilities\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.022776 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-catalog-content\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.127483 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-utilities\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.127686 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-catalog-content\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.127972 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvr4c\" (UniqueName: \"kubernetes.io/projected/6465726d-2c55-4619-8cf1-3e4e28e05580-kube-api-access-qvr4c\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.128551 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-utilities\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.128802 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-catalog-content\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.156277 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvr4c\" (UniqueName: \"kubernetes.io/projected/6465726d-2c55-4619-8cf1-3e4e28e05580-kube-api-access-qvr4c\") pod \"community-operators-8lnvf\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:14 crc kubenswrapper[4780]: I1210 11:26:14.328946 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:15 crc kubenswrapper[4780]: I1210 11:26:15.064575 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8lnvf"] Dec 10 11:26:16 crc kubenswrapper[4780]: W1210 11:26:16.104254 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6465726d_2c55_4619_8cf1_3e4e28e05580.slice/crio-5487bbcadbd044e53f4ea6c5de5075d6946fb7800c0c7aebcf916b99e4ab4038 WatchSource:0}: Error finding container 5487bbcadbd044e53f4ea6c5de5075d6946fb7800c0c7aebcf916b99e4ab4038: Status 404 returned error can't find the container with id 5487bbcadbd044e53f4ea6c5de5075d6946fb7800c0c7aebcf916b99e4ab4038 Dec 10 11:26:16 crc kubenswrapper[4780]: I1210 11:26:16.517507 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8lnvf" event={"ID":"6465726d-2c55-4619-8cf1-3e4e28e05580","Type":"ContainerStarted","Data":"5487bbcadbd044e53f4ea6c5de5075d6946fb7800c0c7aebcf916b99e4ab4038"} Dec 10 11:26:17 crc kubenswrapper[4780]: E1210 11:26:17.271163 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6465726d_2c55_4619_8cf1_3e4e28e05580.slice/crio-conmon-c34c7d0fecda90e37c32d9d615bfc211d8b69706abaa7ee9dbdaad22afc1db70.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:26:17 crc kubenswrapper[4780]: I1210 11:26:17.535343 4780 generic.go:334] "Generic (PLEG): container finished" podID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerID="c34c7d0fecda90e37c32d9d615bfc211d8b69706abaa7ee9dbdaad22afc1db70" exitCode=0 Dec 10 11:26:17 crc kubenswrapper[4780]: I1210 11:26:17.535431 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8lnvf" event={"ID":"6465726d-2c55-4619-8cf1-3e4e28e05580","Type":"ContainerDied","Data":"c34c7d0fecda90e37c32d9d615bfc211d8b69706abaa7ee9dbdaad22afc1db70"} Dec 10 11:26:19 crc kubenswrapper[4780]: I1210 11:26:19.569171 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8lnvf" event={"ID":"6465726d-2c55-4619-8cf1-3e4e28e05580","Type":"ContainerStarted","Data":"dd9afff69cfd070df9740a4c18d48ce3cbfb230844d4cf9675078aeb0980009a"} Dec 10 11:26:19 crc kubenswrapper[4780]: E1210 11:26:19.962094 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:26:21 crc kubenswrapper[4780]: I1210 11:26:21.620305 4780 generic.go:334] "Generic (PLEG): container finished" podID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerID="dd9afff69cfd070df9740a4c18d48ce3cbfb230844d4cf9675078aeb0980009a" exitCode=0 Dec 10 11:26:21 crc kubenswrapper[4780]: I1210 11:26:21.620698 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8lnvf" event={"ID":"6465726d-2c55-4619-8cf1-3e4e28e05580","Type":"ContainerDied","Data":"dd9afff69cfd070df9740a4c18d48ce3cbfb230844d4cf9675078aeb0980009a"} Dec 10 11:26:21 crc kubenswrapper[4780]: E1210 11:26:21.961769 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:26:22 crc kubenswrapper[4780]: I1210 11:26:22.960313 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:26:22 crc kubenswrapper[4780]: E1210 11:26:22.962327 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:26:24 crc kubenswrapper[4780]: I1210 11:26:24.043313 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-wqs8s"] Dec 10 11:26:24 crc kubenswrapper[4780]: I1210 11:26:24.057732 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-wqs8s"] Dec 10 11:26:24 crc kubenswrapper[4780]: I1210 11:26:24.666321 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8lnvf" event={"ID":"6465726d-2c55-4619-8cf1-3e4e28e05580","Type":"ContainerStarted","Data":"4d3e76b18613f0aed504dc6d72d1e3e40aefa4b55748808d2e55bd00027b73bc"} Dec 10 11:26:25 crc kubenswrapper[4780]: I1210 11:26:25.714181 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8lnvf" podStartSLOduration=6.073181931 podStartE2EDuration="12.714154707s" podCreationTimestamp="2025-12-10 11:26:13 +0000 UTC" firstStartedPulling="2025-12-10 11:26:17.539225088 +0000 UTC m=+2482.392618531" lastFinishedPulling="2025-12-10 11:26:24.180197864 +0000 UTC m=+2489.033591307" observedRunningTime="2025-12-10 11:26:25.705536357 +0000 UTC m=+2490.558929810" watchObservedRunningTime="2025-12-10 11:26:25.714154707 +0000 UTC m=+2490.567548150" Dec 10 11:26:25 crc kubenswrapper[4780]: I1210 11:26:25.977869 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2a06360-9c37-4ae4-8148-73c37d2be5a4" path="/var/lib/kubelet/pods/f2a06360-9c37-4ae4-8148-73c37d2be5a4/volumes" Dec 10 11:26:33 crc kubenswrapper[4780]: E1210 11:26:33.963428 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:26:34 crc kubenswrapper[4780]: I1210 11:26:34.331189 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:34 crc kubenswrapper[4780]: I1210 11:26:34.331272 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:34 crc kubenswrapper[4780]: I1210 11:26:34.410642 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:35 crc kubenswrapper[4780]: I1210 11:26:35.077531 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:26:35 crc kubenswrapper[4780]: E1210 11:26:35.078019 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:26:35 crc kubenswrapper[4780]: I1210 11:26:35.111716 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:35 crc kubenswrapper[4780]: E1210 11:26:35.212373 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:26:35 crc kubenswrapper[4780]: E1210 11:26:35.212501 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:26:35 crc kubenswrapper[4780]: E1210 11:26:35.212821 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:26:35 crc kubenswrapper[4780]: E1210 11:26:35.214278 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:26:35 crc kubenswrapper[4780]: I1210 11:26:35.215351 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8lnvf"] Dec 10 11:26:36 crc kubenswrapper[4780]: I1210 11:26:36.843848 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8lnvf" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerName="registry-server" containerID="cri-o://4d3e76b18613f0aed504dc6d72d1e3e40aefa4b55748808d2e55bd00027b73bc" gracePeriod=2 Dec 10 11:26:37 crc kubenswrapper[4780]: I1210 11:26:37.864362 4780 generic.go:334] "Generic (PLEG): container finished" podID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerID="4d3e76b18613f0aed504dc6d72d1e3e40aefa4b55748808d2e55bd00027b73bc" exitCode=0 Dec 10 11:26:37 crc kubenswrapper[4780]: I1210 11:26:37.864512 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8lnvf" event={"ID":"6465726d-2c55-4619-8cf1-3e4e28e05580","Type":"ContainerDied","Data":"4d3e76b18613f0aed504dc6d72d1e3e40aefa4b55748808d2e55bd00027b73bc"} Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.637877 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.695237 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvr4c\" (UniqueName: \"kubernetes.io/projected/6465726d-2c55-4619-8cf1-3e4e28e05580-kube-api-access-qvr4c\") pod \"6465726d-2c55-4619-8cf1-3e4e28e05580\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.695343 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-catalog-content\") pod \"6465726d-2c55-4619-8cf1-3e4e28e05580\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.695666 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-utilities\") pod \"6465726d-2c55-4619-8cf1-3e4e28e05580\" (UID: \"6465726d-2c55-4619-8cf1-3e4e28e05580\") " Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.697588 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-utilities" (OuterVolumeSpecName: "utilities") pod "6465726d-2c55-4619-8cf1-3e4e28e05580" (UID: "6465726d-2c55-4619-8cf1-3e4e28e05580"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.708321 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6465726d-2c55-4619-8cf1-3e4e28e05580-kube-api-access-qvr4c" (OuterVolumeSpecName: "kube-api-access-qvr4c") pod "6465726d-2c55-4619-8cf1-3e4e28e05580" (UID: "6465726d-2c55-4619-8cf1-3e4e28e05580"). InnerVolumeSpecName "kube-api-access-qvr4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.772513 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6465726d-2c55-4619-8cf1-3e4e28e05580" (UID: "6465726d-2c55-4619-8cf1-3e4e28e05580"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.798551 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.798612 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvr4c\" (UniqueName: \"kubernetes.io/projected/6465726d-2c55-4619-8cf1-3e4e28e05580-kube-api-access-qvr4c\") on node \"crc\" DevicePath \"\"" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.798629 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6465726d-2c55-4619-8cf1-3e4e28e05580-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.884351 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8lnvf" event={"ID":"6465726d-2c55-4619-8cf1-3e4e28e05580","Type":"ContainerDied","Data":"5487bbcadbd044e53f4ea6c5de5075d6946fb7800c0c7aebcf916b99e4ab4038"} Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.884434 4780 scope.go:117] "RemoveContainer" containerID="4d3e76b18613f0aed504dc6d72d1e3e40aefa4b55748808d2e55bd00027b73bc" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.884452 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8lnvf" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.921451 4780 scope.go:117] "RemoveContainer" containerID="dd9afff69cfd070df9740a4c18d48ce3cbfb230844d4cf9675078aeb0980009a" Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.933668 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8lnvf"] Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.950777 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8lnvf"] Dec 10 11:26:38 crc kubenswrapper[4780]: I1210 11:26:38.960811 4780 scope.go:117] "RemoveContainer" containerID="c34c7d0fecda90e37c32d9d615bfc211d8b69706abaa7ee9dbdaad22afc1db70" Dec 10 11:26:39 crc kubenswrapper[4780]: I1210 11:26:39.976589 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" path="/var/lib/kubelet/pods/6465726d-2c55-4619-8cf1-3e4e28e05580/volumes" Dec 10 11:26:43 crc kubenswrapper[4780]: I1210 11:26:43.756827 4780 scope.go:117] "RemoveContainer" containerID="b46304c13c5a74684f5610ec4bd7241e0821086de5aef6ae57a8dc81ad1d8397" Dec 10 11:26:43 crc kubenswrapper[4780]: I1210 11:26:43.807582 4780 scope.go:117] "RemoveContainer" containerID="3d15fd52a10848d5989279b9af385fb02e6adedff6cbb19540bcc05ea3be56ee" Dec 10 11:26:43 crc kubenswrapper[4780]: I1210 11:26:43.885164 4780 scope.go:117] "RemoveContainer" containerID="3502d3dd4df4120fbd326913b61950fb7220222a5f3c9dd175ae7dbd20b0f414" Dec 10 11:26:43 crc kubenswrapper[4780]: I1210 11:26:43.968311 4780 scope.go:117] "RemoveContainer" containerID="48344f280bdf19a5857712446887c552fa4f17973c1275cd703846f6e6ceb87f" Dec 10 11:26:44 crc kubenswrapper[4780]: I1210 11:26:44.020606 4780 scope.go:117] "RemoveContainer" containerID="1d5fb6b09dace4e96b104c1de5d8b4873d0327254dc4f21ad87b3a00f55bdb0d" Dec 10 11:26:44 crc kubenswrapper[4780]: I1210 11:26:44.085726 4780 scope.go:117] "RemoveContainer" containerID="a903948cf1b99b4467725000fe94ec620a61470054b3a7bcbacc76574cc99abc" Dec 10 11:26:44 crc kubenswrapper[4780]: I1210 11:26:44.143869 4780 scope.go:117] "RemoveContainer" containerID="91373aa1f93d376da96852e2ff5f1f90415bb446df36cd4f8fc66e8e029eb6d8" Dec 10 11:26:47 crc kubenswrapper[4780]: I1210 11:26:47.959587 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:26:47 crc kubenswrapper[4780]: E1210 11:26:47.965183 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:26:48 crc kubenswrapper[4780]: E1210 11:26:48.123335 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:26:48 crc kubenswrapper[4780]: E1210 11:26:48.123413 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:26:48 crc kubenswrapper[4780]: E1210 11:26:48.123825 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:26:48 crc kubenswrapper[4780]: E1210 11:26:48.125026 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:26:48 crc kubenswrapper[4780]: E1210 11:26:48.961388 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:26:58 crc kubenswrapper[4780]: E1210 11:26:58.962983 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:27:00 crc kubenswrapper[4780]: I1210 11:27:00.960940 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:27:00 crc kubenswrapper[4780]: E1210 11:27:00.961837 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:27:01 crc kubenswrapper[4780]: E1210 11:27:01.964398 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:27:09 crc kubenswrapper[4780]: I1210 11:27:09.060620 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-wdmbj"] Dec 10 11:27:09 crc kubenswrapper[4780]: I1210 11:27:09.073825 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-wdmbj"] Dec 10 11:27:09 crc kubenswrapper[4780]: I1210 11:27:09.982209 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a58ed76c-f42e-41ca-8e79-3b656701cdea" path="/var/lib/kubelet/pods/a58ed76c-f42e-41ca-8e79-3b656701cdea/volumes" Dec 10 11:27:12 crc kubenswrapper[4780]: I1210 11:27:12.045934 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-kjc6l"] Dec 10 11:27:12 crc kubenswrapper[4780]: I1210 11:27:12.060887 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-kjc6l"] Dec 10 11:27:12 crc kubenswrapper[4780]: I1210 11:27:12.959599 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:27:12 crc kubenswrapper[4780]: E1210 11:27:12.960173 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:27:12 crc kubenswrapper[4780]: E1210 11:27:12.961759 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:27:13 crc kubenswrapper[4780]: E1210 11:27:13.960905 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:27:13 crc kubenswrapper[4780]: I1210 11:27:13.976528 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990" path="/var/lib/kubelet/pods/f8ddd440-8c9e-4d62-8efa-b5c8dd6bc990/volumes" Dec 10 11:27:14 crc kubenswrapper[4780]: I1210 11:27:14.050208 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-9ebb-account-create-update-nb7g9"] Dec 10 11:27:14 crc kubenswrapper[4780]: I1210 11:27:14.066953 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-tw2jn"] Dec 10 11:27:14 crc kubenswrapper[4780]: I1210 11:27:14.080151 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-9ebb-account-create-update-nb7g9"] Dec 10 11:27:14 crc kubenswrapper[4780]: I1210 11:27:14.092200 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-2c72-account-create-update-22zbp"] Dec 10 11:27:14 crc kubenswrapper[4780]: I1210 11:27:14.106954 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-tw2jn"] Dec 10 11:27:14 crc kubenswrapper[4780]: I1210 11:27:14.123470 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-2c72-account-create-update-22zbp"] Dec 10 11:27:14 crc kubenswrapper[4780]: I1210 11:27:14.138258 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-122d-account-create-update-tj4vk"] Dec 10 11:27:14 crc kubenswrapper[4780]: I1210 11:27:14.151050 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-122d-account-create-update-tj4vk"] Dec 10 11:27:15 crc kubenswrapper[4780]: I1210 11:27:15.982498 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00fa7636-13e7-49b7-8ce0-dac88eab63d7" path="/var/lib/kubelet/pods/00fa7636-13e7-49b7-8ce0-dac88eab63d7/volumes" Dec 10 11:27:15 crc kubenswrapper[4780]: I1210 11:27:15.984962 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87bfb484-2111-41a3-99d5-52e8db80f098" path="/var/lib/kubelet/pods/87bfb484-2111-41a3-99d5-52e8db80f098/volumes" Dec 10 11:27:15 crc kubenswrapper[4780]: I1210 11:27:15.985992 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1f2fac2-0797-448c-b4d8-98ebd4eff159" path="/var/lib/kubelet/pods/b1f2fac2-0797-448c-b4d8-98ebd4eff159/volumes" Dec 10 11:27:15 crc kubenswrapper[4780]: I1210 11:27:15.987207 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9a943a1-6945-4117-a123-5c96d85b4e77" path="/var/lib/kubelet/pods/d9a943a1-6945-4117-a123-5c96d85b4e77/volumes" Dec 10 11:27:24 crc kubenswrapper[4780]: E1210 11:27:24.962715 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:27:26 crc kubenswrapper[4780]: I1210 11:27:26.220222 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:27:26 crc kubenswrapper[4780]: E1210 11:27:26.221113 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:27:27 crc kubenswrapper[4780]: E1210 11:27:27.964174 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:27:36 crc kubenswrapper[4780]: I1210 11:27:36.959804 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:27:36 crc kubenswrapper[4780]: E1210 11:27:36.961039 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:27:39 crc kubenswrapper[4780]: E1210 11:27:39.962746 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:27:39 crc kubenswrapper[4780]: E1210 11:27:39.962937 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:27:44 crc kubenswrapper[4780]: I1210 11:27:44.660901 4780 scope.go:117] "RemoveContainer" containerID="969f13fc29150a1fb6e7ff4c3c5474894ef36f1dba90993637cecbad493ae9d4" Dec 10 11:27:44 crc kubenswrapper[4780]: I1210 11:27:44.724727 4780 scope.go:117] "RemoveContainer" containerID="c9d38332c37fbfbdad98cfaa7496e6ae5334f6bcf682e91dc45b45fa4343e0d6" Dec 10 11:27:44 crc kubenswrapper[4780]: I1210 11:27:44.915502 4780 scope.go:117] "RemoveContainer" containerID="8a05503156564e80d3285b9e0e559aa401ec58508cb35e2e5195b78c239dab14" Dec 10 11:27:44 crc kubenswrapper[4780]: I1210 11:27:44.978209 4780 scope.go:117] "RemoveContainer" containerID="048524975a1a80f19a60acb0a5ce2cd364087a155f6c9082cc77617ae2fabac8" Dec 10 11:27:45 crc kubenswrapper[4780]: I1210 11:27:45.042339 4780 scope.go:117] "RemoveContainer" containerID="acf7303f3fefe241e01fdccd9fcc36846ef4a752ef9cd94acaa10a5bc3e563c5" Dec 10 11:27:45 crc kubenswrapper[4780]: I1210 11:27:45.137300 4780 scope.go:117] "RemoveContainer" containerID="194a2f6ed27dc0727e61c4c42fdfaefc27f2efebac0daed63bba695fb2d09a33" Dec 10 11:27:48 crc kubenswrapper[4780]: I1210 11:27:48.965064 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:27:48 crc kubenswrapper[4780]: E1210 11:27:48.990946 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:27:52 crc kubenswrapper[4780]: E1210 11:27:52.962679 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:27:54 crc kubenswrapper[4780]: E1210 11:27:54.964347 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:28:03 crc kubenswrapper[4780]: I1210 11:28:03.959974 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:28:03 crc kubenswrapper[4780]: E1210 11:28:03.961611 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:28:05 crc kubenswrapper[4780]: E1210 11:28:05.163446 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:28:06 crc kubenswrapper[4780]: E1210 11:28:06.963188 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:28:14 crc kubenswrapper[4780]: I1210 11:28:14.959532 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:28:14 crc kubenswrapper[4780]: E1210 11:28:14.960872 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:28:18 crc kubenswrapper[4780]: E1210 11:28:18.962156 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:28:22 crc kubenswrapper[4780]: E1210 11:28:22.203679 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:28:28 crc kubenswrapper[4780]: I1210 11:28:28.960042 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:28:28 crc kubenswrapper[4780]: E1210 11:28:28.961616 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:28:30 crc kubenswrapper[4780]: E1210 11:28:30.964297 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:28:34 crc kubenswrapper[4780]: E1210 11:28:34.964565 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:28:40 crc kubenswrapper[4780]: I1210 11:28:40.959810 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:28:40 crc kubenswrapper[4780]: E1210 11:28:40.961272 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:28:42 crc kubenswrapper[4780]: I1210 11:28:42.060642 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-slw88"] Dec 10 11:28:42 crc kubenswrapper[4780]: I1210 11:28:42.077786 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-slw88"] Dec 10 11:28:43 crc kubenswrapper[4780]: I1210 11:28:43.983096 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a94e2b82-6087-4791-8f15-e1ca9e25028b" path="/var/lib/kubelet/pods/a94e2b82-6087-4791-8f15-e1ca9e25028b/volumes" Dec 10 11:28:44 crc kubenswrapper[4780]: E1210 11:28:44.963730 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:28:45 crc kubenswrapper[4780]: I1210 11:28:45.401404 4780 scope.go:117] "RemoveContainer" containerID="c36d091e25c263274562bbc15d2b6e015d2fa99fce1a12ab74953cb7b4d4452b" Dec 10 11:28:45 crc kubenswrapper[4780]: E1210 11:28:45.971659 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:28:51 crc kubenswrapper[4780]: I1210 11:28:51.960641 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:28:51 crc kubenswrapper[4780]: E1210 11:28:51.962043 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:28:57 crc kubenswrapper[4780]: E1210 11:28:57.963415 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:28:58 crc kubenswrapper[4780]: E1210 11:28:58.963480 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:28:59 crc kubenswrapper[4780]: I1210 11:28:59.054532 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-3287-account-create-update-plmqg"] Dec 10 11:28:59 crc kubenswrapper[4780]: I1210 11:28:59.072300 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-ccvcx"] Dec 10 11:28:59 crc kubenswrapper[4780]: I1210 11:28:59.109516 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-3287-account-create-update-plmqg"] Dec 10 11:28:59 crc kubenswrapper[4780]: I1210 11:28:59.125588 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-ccvcx"] Dec 10 11:29:00 crc kubenswrapper[4780]: I1210 11:29:00.001251 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81f2ad5f-8eec-47e0-9958-ec5f0be03fae" path="/var/lib/kubelet/pods/81f2ad5f-8eec-47e0-9958-ec5f0be03fae/volumes" Dec 10 11:29:00 crc kubenswrapper[4780]: I1210 11:29:00.003500 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c52cc2d2-45a5-4e32-8320-db28a4a5b5c6" path="/var/lib/kubelet/pods/c52cc2d2-45a5-4e32-8320-db28a4a5b5c6/volumes" Dec 10 11:29:05 crc kubenswrapper[4780]: I1210 11:29:05.970109 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:29:05 crc kubenswrapper[4780]: E1210 11:29:05.971428 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:29:10 crc kubenswrapper[4780]: E1210 11:29:10.962870 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:29:11 crc kubenswrapper[4780]: E1210 11:29:11.961433 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:29:19 crc kubenswrapper[4780]: I1210 11:29:19.960759 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:29:19 crc kubenswrapper[4780]: E1210 11:29:19.963430 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:29:21 crc kubenswrapper[4780]: I1210 11:29:21.059056 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-ff8ld"] Dec 10 11:29:21 crc kubenswrapper[4780]: I1210 11:29:21.073616 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-ff8ld"] Dec 10 11:29:21 crc kubenswrapper[4780]: I1210 11:29:21.975867 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75ef71d6-20b7-40b6-83c3-b3ee314c827f" path="/var/lib/kubelet/pods/75ef71d6-20b7-40b6-83c3-b3ee314c827f/volumes" Dec 10 11:29:22 crc kubenswrapper[4780]: E1210 11:29:22.961875 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:29:23 crc kubenswrapper[4780]: I1210 11:29:23.054227 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-97bxk"] Dec 10 11:29:23 crc kubenswrapper[4780]: I1210 11:29:23.066782 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-97bxk"] Dec 10 11:29:23 crc kubenswrapper[4780]: I1210 11:29:23.980991 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fd04a14-0a8f-4491-a00c-3fb008e736ce" path="/var/lib/kubelet/pods/2fd04a14-0a8f-4491-a00c-3fb008e736ce/volumes" Dec 10 11:29:24 crc kubenswrapper[4780]: E1210 11:29:24.963758 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:29:27 crc kubenswrapper[4780]: I1210 11:29:27.055523 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-xd5bf"] Dec 10 11:29:27 crc kubenswrapper[4780]: I1210 11:29:27.080396 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-xd5bf"] Dec 10 11:29:27 crc kubenswrapper[4780]: I1210 11:29:27.980655 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a624d997-bd02-460a-9dd0-d636be0d70ef" path="/var/lib/kubelet/pods/a624d997-bd02-460a-9dd0-d636be0d70ef/volumes" Dec 10 11:29:32 crc kubenswrapper[4780]: I1210 11:29:32.959557 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:29:32 crc kubenswrapper[4780]: E1210 11:29:32.960708 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:29:33 crc kubenswrapper[4780]: E1210 11:29:33.963262 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:29:38 crc kubenswrapper[4780]: E1210 11:29:38.964388 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:29:43 crc kubenswrapper[4780]: I1210 11:29:43.961814 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:29:43 crc kubenswrapper[4780]: E1210 11:29:43.962698 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:29:45 crc kubenswrapper[4780]: I1210 11:29:45.530866 4780 scope.go:117] "RemoveContainer" containerID="7e72e29420e67e29c52213e47dffb654f1ca1b241d782ea5a72f1a769baf73c6" Dec 10 11:29:45 crc kubenswrapper[4780]: I1210 11:29:45.654348 4780 scope.go:117] "RemoveContainer" containerID="2c05254e53813076e4d628f0768d3f4cda15091643bc5ceafb945d1d46236498" Dec 10 11:29:45 crc kubenswrapper[4780]: I1210 11:29:45.712159 4780 scope.go:117] "RemoveContainer" containerID="b3a4aab9254d7d3152f1da5803b25aab61c300861a05794a305988f9ed012c34" Dec 10 11:29:45 crc kubenswrapper[4780]: I1210 11:29:45.778261 4780 scope.go:117] "RemoveContainer" containerID="a200422138b0eb32a34b61428752536bc17b969f3a2813a5dd548f16f980de37" Dec 10 11:29:45 crc kubenswrapper[4780]: I1210 11:29:45.815398 4780 scope.go:117] "RemoveContainer" containerID="7b2a6371dc15b316aada82df6419122cdb9ee3ef98f065d428c64908c5deaa48" Dec 10 11:29:45 crc kubenswrapper[4780]: E1210 11:29:45.977731 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:29:50 crc kubenswrapper[4780]: E1210 11:29:50.963292 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:29:54 crc kubenswrapper[4780]: I1210 11:29:54.960449 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:29:54 crc kubenswrapper[4780]: E1210 11:29:54.961507 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:29:57 crc kubenswrapper[4780]: E1210 11:29:57.962048 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.167746 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm"] Dec 10 11:30:00 crc kubenswrapper[4780]: E1210 11:30:00.169382 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerName="registry-server" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.169896 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerName="registry-server" Dec 10 11:30:00 crc kubenswrapper[4780]: E1210 11:30:00.170007 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerName="extract-content" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.170019 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerName="extract-content" Dec 10 11:30:00 crc kubenswrapper[4780]: E1210 11:30:00.170044 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerName="extract-utilities" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.170055 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerName="extract-utilities" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.170484 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="6465726d-2c55-4619-8cf1-3e4e28e05580" containerName="registry-server" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.171902 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.177908 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.178443 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.179315 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm"] Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.240037 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6dfd0f3-3c4c-450d-917f-51212e8c6809-config-volume\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.240550 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwwq9\" (UniqueName: \"kubernetes.io/projected/d6dfd0f3-3c4c-450d-917f-51212e8c6809-kube-api-access-kwwq9\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.240680 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6dfd0f3-3c4c-450d-917f-51212e8c6809-secret-volume\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.344760 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6dfd0f3-3c4c-450d-917f-51212e8c6809-config-volume\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.345055 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwwq9\" (UniqueName: \"kubernetes.io/projected/d6dfd0f3-3c4c-450d-917f-51212e8c6809-kube-api-access-kwwq9\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.345113 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6dfd0f3-3c4c-450d-917f-51212e8c6809-secret-volume\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.347124 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6dfd0f3-3c4c-450d-917f-51212e8c6809-config-volume\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.357460 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6dfd0f3-3c4c-450d-917f-51212e8c6809-secret-volume\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.370018 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwwq9\" (UniqueName: \"kubernetes.io/projected/d6dfd0f3-3c4c-450d-917f-51212e8c6809-kube-api-access-kwwq9\") pod \"collect-profiles-29422770-4chsm\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:00 crc kubenswrapper[4780]: I1210 11:30:00.539105 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:01 crc kubenswrapper[4780]: I1210 11:30:01.180016 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm"] Dec 10 11:30:01 crc kubenswrapper[4780]: I1210 11:30:01.205054 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" event={"ID":"d6dfd0f3-3c4c-450d-917f-51212e8c6809","Type":"ContainerStarted","Data":"84f6c3b39f748ca2ddd057db28f6e49d8c163adb584efd02e391ce2b1bb66703"} Dec 10 11:30:01 crc kubenswrapper[4780]: E1210 11:30:01.961986 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:30:02 crc kubenswrapper[4780]: I1210 11:30:02.219222 4780 generic.go:334] "Generic (PLEG): container finished" podID="d6dfd0f3-3c4c-450d-917f-51212e8c6809" containerID="489a3ff254e426f632724274689fa2866e1cb92ce421ecb30dc54aa3b1fbe22b" exitCode=0 Dec 10 11:30:02 crc kubenswrapper[4780]: I1210 11:30:02.219304 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" event={"ID":"d6dfd0f3-3c4c-450d-917f-51212e8c6809","Type":"ContainerDied","Data":"489a3ff254e426f632724274689fa2866e1cb92ce421ecb30dc54aa3b1fbe22b"} Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.734075 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.770706 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6dfd0f3-3c4c-450d-917f-51212e8c6809-secret-volume\") pod \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.770868 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6dfd0f3-3c4c-450d-917f-51212e8c6809-config-volume\") pod \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.770947 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwwq9\" (UniqueName: \"kubernetes.io/projected/d6dfd0f3-3c4c-450d-917f-51212e8c6809-kube-api-access-kwwq9\") pod \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\" (UID: \"d6dfd0f3-3c4c-450d-917f-51212e8c6809\") " Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.773166 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6dfd0f3-3c4c-450d-917f-51212e8c6809-config-volume" (OuterVolumeSpecName: "config-volume") pod "d6dfd0f3-3c4c-450d-917f-51212e8c6809" (UID: "d6dfd0f3-3c4c-450d-917f-51212e8c6809"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.788169 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6dfd0f3-3c4c-450d-917f-51212e8c6809-kube-api-access-kwwq9" (OuterVolumeSpecName: "kube-api-access-kwwq9") pod "d6dfd0f3-3c4c-450d-917f-51212e8c6809" (UID: "d6dfd0f3-3c4c-450d-917f-51212e8c6809"). InnerVolumeSpecName "kube-api-access-kwwq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.792425 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6dfd0f3-3c4c-450d-917f-51212e8c6809-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d6dfd0f3-3c4c-450d-917f-51212e8c6809" (UID: "d6dfd0f3-3c4c-450d-917f-51212e8c6809"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.876533 4780 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d6dfd0f3-3c4c-450d-917f-51212e8c6809-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.876621 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d6dfd0f3-3c4c-450d-917f-51212e8c6809-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:30:03 crc kubenswrapper[4780]: I1210 11:30:03.876645 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwwq9\" (UniqueName: \"kubernetes.io/projected/d6dfd0f3-3c4c-450d-917f-51212e8c6809-kube-api-access-kwwq9\") on node \"crc\" DevicePath \"\"" Dec 10 11:30:04 crc kubenswrapper[4780]: I1210 11:30:04.259152 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" event={"ID":"d6dfd0f3-3c4c-450d-917f-51212e8c6809","Type":"ContainerDied","Data":"84f6c3b39f748ca2ddd057db28f6e49d8c163adb584efd02e391ce2b1bb66703"} Dec 10 11:30:04 crc kubenswrapper[4780]: I1210 11:30:04.259757 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84f6c3b39f748ca2ddd057db28f6e49d8c163adb584efd02e391ce2b1bb66703" Dec 10 11:30:04 crc kubenswrapper[4780]: I1210 11:30:04.259253 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm" Dec 10 11:30:04 crc kubenswrapper[4780]: I1210 11:30:04.839912 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4"] Dec 10 11:30:04 crc kubenswrapper[4780]: I1210 11:30:04.853302 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-lvhr4"] Dec 10 11:30:05 crc kubenswrapper[4780]: I1210 11:30:05.986826 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="536a1b3e-4cda-4ed7-985a-595c13968356" path="/var/lib/kubelet/pods/536a1b3e-4cda-4ed7-985a-595c13968356/volumes" Dec 10 11:30:07 crc kubenswrapper[4780]: I1210 11:30:07.970218 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:30:07 crc kubenswrapper[4780]: E1210 11:30:07.971171 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:30:09 crc kubenswrapper[4780]: I1210 11:30:09.041911 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-qkv7r"] Dec 10 11:30:09 crc kubenswrapper[4780]: I1210 11:30:09.056021 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-qkv7r"] Dec 10 11:30:09 crc kubenswrapper[4780]: E1210 11:30:09.962877 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:30:09 crc kubenswrapper[4780]: I1210 11:30:09.975996 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d296ea05-7391-4cd2-a807-1168a1547cb6" path="/var/lib/kubelet/pods/d296ea05-7391-4cd2-a807-1168a1547cb6/volumes" Dec 10 11:30:13 crc kubenswrapper[4780]: E1210 11:30:13.964111 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:30:19 crc kubenswrapper[4780]: I1210 11:30:19.961017 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:30:19 crc kubenswrapper[4780]: E1210 11:30:19.963823 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:30:20 crc kubenswrapper[4780]: E1210 11:30:20.963565 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:30:24 crc kubenswrapper[4780]: E1210 11:30:24.962026 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:30:31 crc kubenswrapper[4780]: I1210 11:30:31.959087 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:30:31 crc kubenswrapper[4780]: E1210 11:30:31.960138 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:30:31 crc kubenswrapper[4780]: E1210 11:30:31.964594 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:30:36 crc kubenswrapper[4780]: E1210 11:30:36.962479 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:30:46 crc kubenswrapper[4780]: I1210 11:30:46.009857 4780 scope.go:117] "RemoveContainer" containerID="58e831f01e8b2dac91b4455f4311f9c942493c697cffcabc861b705c151f9418" Dec 10 11:30:46 crc kubenswrapper[4780]: I1210 11:30:46.112776 4780 scope.go:117] "RemoveContainer" containerID="9a6dcf7d26cfc3d7057fe07bbb5f952e4b4522fe78fd34e23be5869efd50de11" Dec 10 11:30:46 crc kubenswrapper[4780]: I1210 11:30:46.960219 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:30:46 crc kubenswrapper[4780]: E1210 11:30:46.960900 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:30:46 crc kubenswrapper[4780]: E1210 11:30:46.962634 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:30:51 crc kubenswrapper[4780]: E1210 11:30:51.962978 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:30:57 crc kubenswrapper[4780]: I1210 11:30:57.961420 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:30:57 crc kubenswrapper[4780]: E1210 11:30:57.962895 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:30:57 crc kubenswrapper[4780]: E1210 11:30:57.963366 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:31:04 crc kubenswrapper[4780]: E1210 11:31:04.962605 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:31:09 crc kubenswrapper[4780]: I1210 11:31:09.962292 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:31:10 crc kubenswrapper[4780]: E1210 11:31:10.004215 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:31:11 crc kubenswrapper[4780]: I1210 11:31:11.805219 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"4894a3439f4512784fa82a7e629e0e227ca42119000a7b39d3567af26b9dd9a7"} Dec 10 11:31:16 crc kubenswrapper[4780]: E1210 11:31:16.963663 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:31:23 crc kubenswrapper[4780]: E1210 11:31:23.962186 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.374002 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5txdr"] Dec 10 11:31:26 crc kubenswrapper[4780]: E1210 11:31:26.375303 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6dfd0f3-3c4c-450d-917f-51212e8c6809" containerName="collect-profiles" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.375329 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6dfd0f3-3c4c-450d-917f-51212e8c6809" containerName="collect-profiles" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.375764 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6dfd0f3-3c4c-450d-917f-51212e8c6809" containerName="collect-profiles" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.379941 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.399104 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5txdr"] Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.548152 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lprhc\" (UniqueName: \"kubernetes.io/projected/b4a0368b-1c17-4f79-8917-6b568f686669-kube-api-access-lprhc\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.559195 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-utilities\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.559615 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-catalog-content\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.668111 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lprhc\" (UniqueName: \"kubernetes.io/projected/b4a0368b-1c17-4f79-8917-6b568f686669-kube-api-access-lprhc\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.668226 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-utilities\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.668337 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-catalog-content\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.669684 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-utilities\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.671712 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-catalog-content\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.716292 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lprhc\" (UniqueName: \"kubernetes.io/projected/b4a0368b-1c17-4f79-8917-6b568f686669-kube-api-access-lprhc\") pod \"redhat-operators-5txdr\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:26 crc kubenswrapper[4780]: I1210 11:31:26.728467 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:27 crc kubenswrapper[4780]: I1210 11:31:27.661827 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5txdr"] Dec 10 11:31:28 crc kubenswrapper[4780]: I1210 11:31:28.174855 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5txdr" event={"ID":"b4a0368b-1c17-4f79-8917-6b568f686669","Type":"ContainerStarted","Data":"11f728081cfb866c9c5e1a557f519e8d8f375663b879e8f5c8b31cc30d5e506a"} Dec 10 11:31:29 crc kubenswrapper[4780]: I1210 11:31:29.197000 4780 generic.go:334] "Generic (PLEG): container finished" podID="b4a0368b-1c17-4f79-8917-6b568f686669" containerID="11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103" exitCode=0 Dec 10 11:31:29 crc kubenswrapper[4780]: I1210 11:31:29.197128 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5txdr" event={"ID":"b4a0368b-1c17-4f79-8917-6b568f686669","Type":"ContainerDied","Data":"11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103"} Dec 10 11:31:29 crc kubenswrapper[4780]: I1210 11:31:29.200790 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:31:30 crc kubenswrapper[4780]: I1210 11:31:30.217391 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5txdr" event={"ID":"b4a0368b-1c17-4f79-8917-6b568f686669","Type":"ContainerStarted","Data":"bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012"} Dec 10 11:31:30 crc kubenswrapper[4780]: E1210 11:31:30.964292 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:31:36 crc kubenswrapper[4780]: I1210 11:31:36.545703 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5txdr" event={"ID":"b4a0368b-1c17-4f79-8917-6b568f686669","Type":"ContainerDied","Data":"bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012"} Dec 10 11:31:36 crc kubenswrapper[4780]: I1210 11:31:36.545645 4780 generic.go:334] "Generic (PLEG): container finished" podID="b4a0368b-1c17-4f79-8917-6b568f686669" containerID="bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012" exitCode=0 Dec 10 11:31:38 crc kubenswrapper[4780]: E1210 11:31:38.091671 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:31:38 crc kubenswrapper[4780]: E1210 11:31:38.092435 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:31:38 crc kubenswrapper[4780]: E1210 11:31:38.092805 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:31:38 crc kubenswrapper[4780]: E1210 11:31:38.094005 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:31:38 crc kubenswrapper[4780]: I1210 11:31:38.574410 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5txdr" event={"ID":"b4a0368b-1c17-4f79-8917-6b568f686669","Type":"ContainerStarted","Data":"3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719"} Dec 10 11:31:38 crc kubenswrapper[4780]: I1210 11:31:38.618295 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5txdr" podStartSLOduration=4.170760404 podStartE2EDuration="12.618251759s" podCreationTimestamp="2025-12-10 11:31:26 +0000 UTC" firstStartedPulling="2025-12-10 11:31:29.2004862 +0000 UTC m=+2794.053879643" lastFinishedPulling="2025-12-10 11:31:37.647977555 +0000 UTC m=+2802.501370998" observedRunningTime="2025-12-10 11:31:38.612641776 +0000 UTC m=+2803.466035219" watchObservedRunningTime="2025-12-10 11:31:38.618251759 +0000 UTC m=+2803.471645202" Dec 10 11:31:43 crc kubenswrapper[4780]: E1210 11:31:43.963248 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:31:46 crc kubenswrapper[4780]: I1210 11:31:46.729531 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:46 crc kubenswrapper[4780]: I1210 11:31:46.730290 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:47 crc kubenswrapper[4780]: I1210 11:31:47.997848 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5txdr" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="registry-server" probeResult="failure" output=< Dec 10 11:31:47 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:31:47 crc kubenswrapper[4780]: > Dec 10 11:31:48 crc kubenswrapper[4780]: E1210 11:31:48.962593 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:31:56 crc kubenswrapper[4780]: I1210 11:31:56.792526 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:57 crc kubenswrapper[4780]: I1210 11:31:57.016463 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:57 crc kubenswrapper[4780]: E1210 11:31:57.134191 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:31:57 crc kubenswrapper[4780]: E1210 11:31:57.134308 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:31:57 crc kubenswrapper[4780]: E1210 11:31:57.134548 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:31:57 crc kubenswrapper[4780]: E1210 11:31:57.135998 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:31:57 crc kubenswrapper[4780]: I1210 11:31:57.578290 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5txdr"] Dec 10 11:31:58 crc kubenswrapper[4780]: I1210 11:31:58.995181 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5txdr" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="registry-server" containerID="cri-o://3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719" gracePeriod=2 Dec 10 11:31:59 crc kubenswrapper[4780]: I1210 11:31:59.738110 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:31:59 crc kubenswrapper[4780]: I1210 11:31:59.902217 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-utilities\") pod \"b4a0368b-1c17-4f79-8917-6b568f686669\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " Dec 10 11:31:59 crc kubenswrapper[4780]: I1210 11:31:59.902356 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-catalog-content\") pod \"b4a0368b-1c17-4f79-8917-6b568f686669\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " Dec 10 11:31:59 crc kubenswrapper[4780]: I1210 11:31:59.902818 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lprhc\" (UniqueName: \"kubernetes.io/projected/b4a0368b-1c17-4f79-8917-6b568f686669-kube-api-access-lprhc\") pod \"b4a0368b-1c17-4f79-8917-6b568f686669\" (UID: \"b4a0368b-1c17-4f79-8917-6b568f686669\") " Dec 10 11:31:59 crc kubenswrapper[4780]: I1210 11:31:59.903741 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-utilities" (OuterVolumeSpecName: "utilities") pod "b4a0368b-1c17-4f79-8917-6b568f686669" (UID: "b4a0368b-1c17-4f79-8917-6b568f686669"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:31:59 crc kubenswrapper[4780]: I1210 11:31:59.919255 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4a0368b-1c17-4f79-8917-6b568f686669-kube-api-access-lprhc" (OuterVolumeSpecName: "kube-api-access-lprhc") pod "b4a0368b-1c17-4f79-8917-6b568f686669" (UID: "b4a0368b-1c17-4f79-8917-6b568f686669"). InnerVolumeSpecName "kube-api-access-lprhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.007220 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.007268 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lprhc\" (UniqueName: \"kubernetes.io/projected/b4a0368b-1c17-4f79-8917-6b568f686669-kube-api-access-lprhc\") on node \"crc\" DevicePath \"\"" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.021264 4780 generic.go:334] "Generic (PLEG): container finished" podID="b4a0368b-1c17-4f79-8917-6b568f686669" containerID="3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719" exitCode=0 Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.021399 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5txdr" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.021447 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5txdr" event={"ID":"b4a0368b-1c17-4f79-8917-6b568f686669","Type":"ContainerDied","Data":"3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719"} Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.021849 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5txdr" event={"ID":"b4a0368b-1c17-4f79-8917-6b568f686669","Type":"ContainerDied","Data":"11f728081cfb866c9c5e1a557f519e8d8f375663b879e8f5c8b31cc30d5e506a"} Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.021895 4780 scope.go:117] "RemoveContainer" containerID="3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.039064 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b4a0368b-1c17-4f79-8917-6b568f686669" (UID: "b4a0368b-1c17-4f79-8917-6b568f686669"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.059433 4780 scope.go:117] "RemoveContainer" containerID="bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.093380 4780 scope.go:117] "RemoveContainer" containerID="11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.110282 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4a0368b-1c17-4f79-8917-6b568f686669-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.144910 4780 scope.go:117] "RemoveContainer" containerID="3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719" Dec 10 11:32:00 crc kubenswrapper[4780]: E1210 11:32:00.145480 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719\": container with ID starting with 3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719 not found: ID does not exist" containerID="3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.145526 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719"} err="failed to get container status \"3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719\": rpc error: code = NotFound desc = could not find container \"3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719\": container with ID starting with 3c71dc5f91de429c97b7849a165da9fb52f465fbb66c4ab49232df72dcb45719 not found: ID does not exist" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.145554 4780 scope.go:117] "RemoveContainer" containerID="bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012" Dec 10 11:32:00 crc kubenswrapper[4780]: E1210 11:32:00.145938 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012\": container with ID starting with bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012 not found: ID does not exist" containerID="bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.145993 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012"} err="failed to get container status \"bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012\": rpc error: code = NotFound desc = could not find container \"bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012\": container with ID starting with bf145d04fb0e96c819012dd48031cc10fc9c6b1ceb26e4e1de3fa4d7819cf012 not found: ID does not exist" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.146043 4780 scope.go:117] "RemoveContainer" containerID="11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103" Dec 10 11:32:00 crc kubenswrapper[4780]: E1210 11:32:00.146521 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103\": container with ID starting with 11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103 not found: ID does not exist" containerID="11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.146593 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103"} err="failed to get container status \"11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103\": rpc error: code = NotFound desc = could not find container \"11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103\": container with ID starting with 11acd66eb8367e62d2cee18a5c01ebd8dae30fc5e41141a1cb969ff460112103 not found: ID does not exist" Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.376340 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5txdr"] Dec 10 11:32:00 crc kubenswrapper[4780]: I1210 11:32:00.390612 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5txdr"] Dec 10 11:32:01 crc kubenswrapper[4780]: I1210 11:32:01.978590 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" path="/var/lib/kubelet/pods/b4a0368b-1c17-4f79-8917-6b568f686669/volumes" Dec 10 11:32:03 crc kubenswrapper[4780]: E1210 11:32:03.964502 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:32:08 crc kubenswrapper[4780]: E1210 11:32:08.962294 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:32:14 crc kubenswrapper[4780]: E1210 11:32:14.961347 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:32:21 crc kubenswrapper[4780]: E1210 11:32:21.470466 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:32:27 crc kubenswrapper[4780]: E1210 11:32:27.963080 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:32:31 crc kubenswrapper[4780]: E1210 11:32:31.962908 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:32:41 crc kubenswrapper[4780]: E1210 11:32:41.963616 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:32:42 crc kubenswrapper[4780]: E1210 11:32:42.964093 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:32:53 crc kubenswrapper[4780]: E1210 11:32:53.963341 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.742831 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-25558"] Dec 10 11:32:56 crc kubenswrapper[4780]: E1210 11:32:56.744108 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="extract-content" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.744132 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="extract-content" Dec 10 11:32:56 crc kubenswrapper[4780]: E1210 11:32:56.744182 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="extract-utilities" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.744197 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="extract-utilities" Dec 10 11:32:56 crc kubenswrapper[4780]: E1210 11:32:56.744232 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="registry-server" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.744239 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="registry-server" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.744600 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4a0368b-1c17-4f79-8917-6b568f686669" containerName="registry-server" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.746994 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.755267 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-25558"] Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.872503 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-catalog-content\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.872734 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9s8nl\" (UniqueName: \"kubernetes.io/projected/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-kube-api-access-9s8nl\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.872838 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-utilities\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.975274 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9s8nl\" (UniqueName: \"kubernetes.io/projected/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-kube-api-access-9s8nl\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.975411 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-utilities\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.975521 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-catalog-content\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.976074 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-utilities\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.976243 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-catalog-content\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:56 crc kubenswrapper[4780]: I1210 11:32:56.996719 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9s8nl\" (UniqueName: \"kubernetes.io/projected/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-kube-api-access-9s8nl\") pod \"certified-operators-25558\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:57 crc kubenswrapper[4780]: I1210 11:32:57.077572 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-25558" Dec 10 11:32:57 crc kubenswrapper[4780]: I1210 11:32:57.884936 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-25558"] Dec 10 11:32:57 crc kubenswrapper[4780]: E1210 11:32:57.969984 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:32:58 crc kubenswrapper[4780]: I1210 11:32:58.318346 4780 generic.go:334] "Generic (PLEG): container finished" podID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerID="4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10" exitCode=0 Dec 10 11:32:58 crc kubenswrapper[4780]: I1210 11:32:58.318475 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-25558" event={"ID":"46d4e8d6-eb63-46c5-bde4-52dab4f4c935","Type":"ContainerDied","Data":"4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10"} Dec 10 11:32:58 crc kubenswrapper[4780]: I1210 11:32:58.319701 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-25558" event={"ID":"46d4e8d6-eb63-46c5-bde4-52dab4f4c935","Type":"ContainerStarted","Data":"227f77bfd16c23be8b238965442e00e858b577a4db603403b325f88ce1548570"} Dec 10 11:33:00 crc kubenswrapper[4780]: I1210 11:33:00.356700 4780 generic.go:334] "Generic (PLEG): container finished" podID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerID="332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2" exitCode=0 Dec 10 11:33:00 crc kubenswrapper[4780]: I1210 11:33:00.356852 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-25558" event={"ID":"46d4e8d6-eb63-46c5-bde4-52dab4f4c935","Type":"ContainerDied","Data":"332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2"} Dec 10 11:33:01 crc kubenswrapper[4780]: I1210 11:33:01.377106 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-25558" event={"ID":"46d4e8d6-eb63-46c5-bde4-52dab4f4c935","Type":"ContainerStarted","Data":"4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270"} Dec 10 11:33:01 crc kubenswrapper[4780]: I1210 11:33:01.406743 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-25558" podStartSLOduration=2.882512668 podStartE2EDuration="5.406705733s" podCreationTimestamp="2025-12-10 11:32:56 +0000 UTC" firstStartedPulling="2025-12-10 11:32:58.320438869 +0000 UTC m=+2883.173832312" lastFinishedPulling="2025-12-10 11:33:00.844631924 +0000 UTC m=+2885.698025377" observedRunningTime="2025-12-10 11:33:01.396648877 +0000 UTC m=+2886.250042320" watchObservedRunningTime="2025-12-10 11:33:01.406705733 +0000 UTC m=+2886.260099186" Dec 10 11:33:07 crc kubenswrapper[4780]: I1210 11:33:07.078668 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-25558" Dec 10 11:33:07 crc kubenswrapper[4780]: I1210 11:33:07.079259 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-25558" Dec 10 11:33:07 crc kubenswrapper[4780]: I1210 11:33:07.153237 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-25558" Dec 10 11:33:07 crc kubenswrapper[4780]: I1210 11:33:07.725796 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-25558" Dec 10 11:33:07 crc kubenswrapper[4780]: I1210 11:33:07.810795 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-25558"] Dec 10 11:33:07 crc kubenswrapper[4780]: E1210 11:33:07.962490 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:33:09 crc kubenswrapper[4780]: I1210 11:33:09.690375 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-25558" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerName="registry-server" containerID="cri-o://4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270" gracePeriod=2 Dec 10 11:33:09 crc kubenswrapper[4780]: E1210 11:33:09.922706 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46d4e8d6_eb63_46c5_bde4_52dab4f4c935.slice/crio-4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46d4e8d6_eb63_46c5_bde4_52dab4f4c935.slice/crio-conmon-4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.636494 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-25558" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.707879 4780 generic.go:334] "Generic (PLEG): container finished" podID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerID="4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270" exitCode=0 Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.707974 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-25558" event={"ID":"46d4e8d6-eb63-46c5-bde4-52dab4f4c935","Type":"ContainerDied","Data":"4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270"} Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.708045 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-25558" event={"ID":"46d4e8d6-eb63-46c5-bde4-52dab4f4c935","Type":"ContainerDied","Data":"227f77bfd16c23be8b238965442e00e858b577a4db603403b325f88ce1548570"} Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.708069 4780 scope.go:117] "RemoveContainer" containerID="4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.708159 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-25558" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.734772 4780 scope.go:117] "RemoveContainer" containerID="332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.771038 4780 scope.go:117] "RemoveContainer" containerID="4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.802643 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9s8nl\" (UniqueName: \"kubernetes.io/projected/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-kube-api-access-9s8nl\") pod \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.802807 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-catalog-content\") pod \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.803121 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-utilities\") pod \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\" (UID: \"46d4e8d6-eb63-46c5-bde4-52dab4f4c935\") " Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.804237 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-utilities" (OuterVolumeSpecName: "utilities") pod "46d4e8d6-eb63-46c5-bde4-52dab4f4c935" (UID: "46d4e8d6-eb63-46c5-bde4-52dab4f4c935"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.810034 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-kube-api-access-9s8nl" (OuterVolumeSpecName: "kube-api-access-9s8nl") pod "46d4e8d6-eb63-46c5-bde4-52dab4f4c935" (UID: "46d4e8d6-eb63-46c5-bde4-52dab4f4c935"). InnerVolumeSpecName "kube-api-access-9s8nl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.828354 4780 scope.go:117] "RemoveContainer" containerID="4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270" Dec 10 11:33:10 crc kubenswrapper[4780]: E1210 11:33:10.829052 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270\": container with ID starting with 4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270 not found: ID does not exist" containerID="4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.829099 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270"} err="failed to get container status \"4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270\": rpc error: code = NotFound desc = could not find container \"4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270\": container with ID starting with 4f792902e3f8f235db5d7ee1fbf7172d13aeac3687d5cec675c89a1e3758d270 not found: ID does not exist" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.829130 4780 scope.go:117] "RemoveContainer" containerID="332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2" Dec 10 11:33:10 crc kubenswrapper[4780]: E1210 11:33:10.829902 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2\": container with ID starting with 332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2 not found: ID does not exist" containerID="332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.829958 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2"} err="failed to get container status \"332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2\": rpc error: code = NotFound desc = could not find container \"332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2\": container with ID starting with 332b69afc110bcdd24906823ed6ca967c9f58e0de8ef7413852d187fd71539d2 not found: ID does not exist" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.829979 4780 scope.go:117] "RemoveContainer" containerID="4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10" Dec 10 11:33:10 crc kubenswrapper[4780]: E1210 11:33:10.830312 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10\": container with ID starting with 4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10 not found: ID does not exist" containerID="4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.830335 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10"} err="failed to get container status \"4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10\": rpc error: code = NotFound desc = could not find container \"4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10\": container with ID starting with 4bf43d94aea5652049046e497d0f3f955df1befd41dddfef055d7c3150ad3f10 not found: ID does not exist" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.857734 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "46d4e8d6-eb63-46c5-bde4-52dab4f4c935" (UID: "46d4e8d6-eb63-46c5-bde4-52dab4f4c935"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.906530 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.906602 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9s8nl\" (UniqueName: \"kubernetes.io/projected/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-kube-api-access-9s8nl\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:10 crc kubenswrapper[4780]: I1210 11:33:10.906614 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46d4e8d6-eb63-46c5-bde4-52dab4f4c935-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:11 crc kubenswrapper[4780]: I1210 11:33:11.057433 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-25558"] Dec 10 11:33:11 crc kubenswrapper[4780]: I1210 11:33:11.070193 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-25558"] Dec 10 11:33:11 crc kubenswrapper[4780]: E1210 11:33:11.965309 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:33:11 crc kubenswrapper[4780]: I1210 11:33:11.980201 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" path="/var/lib/kubelet/pods/46d4e8d6-eb63-46c5-bde4-52dab4f4c935/volumes" Dec 10 11:33:19 crc kubenswrapper[4780]: E1210 11:33:19.963514 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:33:24 crc kubenswrapper[4780]: I1210 11:33:24.917313 4780 generic.go:334] "Generic (PLEG): container finished" podID="eaf4555e-3d94-4509-b81c-2de2321cff58" containerID="d446744984d0168ee1d4e57ca4f6b6fe1b4b75c9ac6e1c9e9e8ddc9a86a05069" exitCode=2 Dec 10 11:33:24 crc kubenswrapper[4780]: I1210 11:33:24.918199 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" event={"ID":"eaf4555e-3d94-4509-b81c-2de2321cff58","Type":"ContainerDied","Data":"d446744984d0168ee1d4e57ca4f6b6fe1b4b75c9ac6e1c9e9e8ddc9a86a05069"} Dec 10 11:33:25 crc kubenswrapper[4780]: E1210 11:33:25.984529 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.589166 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.701483 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-inventory\") pod \"eaf4555e-3d94-4509-b81c-2de2321cff58\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.702029 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jsls\" (UniqueName: \"kubernetes.io/projected/eaf4555e-3d94-4509-b81c-2de2321cff58-kube-api-access-4jsls\") pod \"eaf4555e-3d94-4509-b81c-2de2321cff58\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.702493 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-ssh-key\") pod \"eaf4555e-3d94-4509-b81c-2de2321cff58\" (UID: \"eaf4555e-3d94-4509-b81c-2de2321cff58\") " Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.708582 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eaf4555e-3d94-4509-b81c-2de2321cff58-kube-api-access-4jsls" (OuterVolumeSpecName: "kube-api-access-4jsls") pod "eaf4555e-3d94-4509-b81c-2de2321cff58" (UID: "eaf4555e-3d94-4509-b81c-2de2321cff58"). InnerVolumeSpecName "kube-api-access-4jsls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.742661 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "eaf4555e-3d94-4509-b81c-2de2321cff58" (UID: "eaf4555e-3d94-4509-b81c-2de2321cff58"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.748897 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-inventory" (OuterVolumeSpecName: "inventory") pod "eaf4555e-3d94-4509-b81c-2de2321cff58" (UID: "eaf4555e-3d94-4509-b81c-2de2321cff58"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.806325 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.806376 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jsls\" (UniqueName: \"kubernetes.io/projected/eaf4555e-3d94-4509-b81c-2de2321cff58-kube-api-access-4jsls\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.806396 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaf4555e-3d94-4509-b81c-2de2321cff58-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.960263 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" event={"ID":"eaf4555e-3d94-4509-b81c-2de2321cff58","Type":"ContainerDied","Data":"9f99dbf6f4b834a001a1934c8f4c031bf32d90d2b2a57df5d95970490034fcab"} Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.960325 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f99dbf6f4b834a001a1934c8f4c031bf32d90d2b2a57df5d95970490034fcab" Dec 10 11:33:26 crc kubenswrapper[4780]: I1210 11:33:26.960382 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk" Dec 10 11:33:27 crc kubenswrapper[4780]: I1210 11:33:27.475855 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:33:27 crc kubenswrapper[4780]: I1210 11:33:27.476109 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:33:33 crc kubenswrapper[4780]: E1210 11:33:33.965835 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.056778 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8"] Dec 10 11:33:34 crc kubenswrapper[4780]: E1210 11:33:34.058196 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaf4555e-3d94-4509-b81c-2de2321cff58" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.058227 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaf4555e-3d94-4509-b81c-2de2321cff58" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:33:34 crc kubenswrapper[4780]: E1210 11:33:34.058252 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerName="registry-server" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.058261 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerName="registry-server" Dec 10 11:33:34 crc kubenswrapper[4780]: E1210 11:33:34.058331 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerName="extract-content" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.058342 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerName="extract-content" Dec 10 11:33:34 crc kubenswrapper[4780]: E1210 11:33:34.058391 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerName="extract-utilities" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.058400 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerName="extract-utilities" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.059181 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="46d4e8d6-eb63-46c5-bde4-52dab4f4c935" containerName="registry-server" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.059245 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="eaf4555e-3d94-4509-b81c-2de2321cff58" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.065350 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.069227 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.071286 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.073858 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.074142 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.088328 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8"] Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.231449 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmvkd\" (UniqueName: \"kubernetes.io/projected/460516db-cb7d-4309-bbf8-1b4af468dac4-kube-api-access-zmvkd\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.232262 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.232334 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.335635 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmvkd\" (UniqueName: \"kubernetes.io/projected/460516db-cb7d-4309-bbf8-1b4af468dac4-kube-api-access-zmvkd\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.335817 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.335865 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.343099 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.346657 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.354582 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmvkd\" (UniqueName: \"kubernetes.io/projected/460516db-cb7d-4309-bbf8-1b4af468dac4-kube-api-access-zmvkd\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:34 crc kubenswrapper[4780]: I1210 11:33:34.430275 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:33:35 crc kubenswrapper[4780]: I1210 11:33:35.031402 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8"] Dec 10 11:33:35 crc kubenswrapper[4780]: I1210 11:33:35.141959 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" event={"ID":"460516db-cb7d-4309-bbf8-1b4af468dac4","Type":"ContainerStarted","Data":"6a91d2b21fb49a5aae000d522a0ba615e71beb9ba8ef94da53f4665b94884b85"} Dec 10 11:33:36 crc kubenswrapper[4780]: I1210 11:33:36.162049 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" event={"ID":"460516db-cb7d-4309-bbf8-1b4af468dac4","Type":"ContainerStarted","Data":"42efe205d92ab7d4bbc73529e51de9a0e5ddf8361ce47f68afbc1ec9d65aa2cc"} Dec 10 11:33:36 crc kubenswrapper[4780]: I1210 11:33:36.194539 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" podStartSLOduration=1.6565965980000001 podStartE2EDuration="2.194515399s" podCreationTimestamp="2025-12-10 11:33:34 +0000 UTC" firstStartedPulling="2025-12-10 11:33:35.044104148 +0000 UTC m=+2919.897497591" lastFinishedPulling="2025-12-10 11:33:35.582022949 +0000 UTC m=+2920.435416392" observedRunningTime="2025-12-10 11:33:36.183980079 +0000 UTC m=+2921.037373522" watchObservedRunningTime="2025-12-10 11:33:36.194515399 +0000 UTC m=+2921.047908842" Dec 10 11:33:37 crc kubenswrapper[4780]: E1210 11:33:37.961843 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:33:47 crc kubenswrapper[4780]: E1210 11:33:47.962671 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:33:51 crc kubenswrapper[4780]: E1210 11:33:51.964299 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:33:57 crc kubenswrapper[4780]: I1210 11:33:57.475579 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:33:57 crc kubenswrapper[4780]: I1210 11:33:57.476269 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:34:02 crc kubenswrapper[4780]: E1210 11:34:02.964108 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:34:04 crc kubenswrapper[4780]: E1210 11:34:04.961369 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:34:13 crc kubenswrapper[4780]: E1210 11:34:13.964332 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:34:19 crc kubenswrapper[4780]: E1210 11:34:19.208844 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:34:25 crc kubenswrapper[4780]: E1210 11:34:25.980740 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:34:27 crc kubenswrapper[4780]: I1210 11:34:27.475837 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:34:27 crc kubenswrapper[4780]: I1210 11:34:27.476484 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:34:27 crc kubenswrapper[4780]: I1210 11:34:27.476560 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:34:27 crc kubenswrapper[4780]: I1210 11:34:27.478039 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4894a3439f4512784fa82a7e629e0e227ca42119000a7b39d3567af26b9dd9a7"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:34:27 crc kubenswrapper[4780]: I1210 11:34:27.478106 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://4894a3439f4512784fa82a7e629e0e227ca42119000a7b39d3567af26b9dd9a7" gracePeriod=600 Dec 10 11:34:27 crc kubenswrapper[4780]: I1210 11:34:27.911218 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="4894a3439f4512784fa82a7e629e0e227ca42119000a7b39d3567af26b9dd9a7" exitCode=0 Dec 10 11:34:27 crc kubenswrapper[4780]: I1210 11:34:27.911304 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"4894a3439f4512784fa82a7e629e0e227ca42119000a7b39d3567af26b9dd9a7"} Dec 10 11:34:27 crc kubenswrapper[4780]: I1210 11:34:27.911790 4780 scope.go:117] "RemoveContainer" containerID="c10af5b7a9461382e4108cdd2bb73a0e03a7bf28be1decd85891fe9f2dbf6208" Dec 10 11:34:28 crc kubenswrapper[4780]: I1210 11:34:28.940111 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1"} Dec 10 11:34:29 crc kubenswrapper[4780]: E1210 11:34:29.974686 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:34:39 crc kubenswrapper[4780]: E1210 11:34:39.962490 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:34:41 crc kubenswrapper[4780]: E1210 11:34:41.962731 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:34:53 crc kubenswrapper[4780]: E1210 11:34:53.962220 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:34:53 crc kubenswrapper[4780]: E1210 11:34:53.962413 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:35:06 crc kubenswrapper[4780]: I1210 11:35:06.883793 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6ptwv"] Dec 10 11:35:06 crc kubenswrapper[4780]: I1210 11:35:06.890116 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:06 crc kubenswrapper[4780]: I1210 11:35:06.909278 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ptwv"] Dec 10 11:35:06 crc kubenswrapper[4780]: I1210 11:35:06.963093 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-utilities\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:06 crc kubenswrapper[4780]: I1210 11:35:06.963259 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-catalog-content\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:06 crc kubenswrapper[4780]: I1210 11:35:06.963363 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmfdf\" (UniqueName: \"kubernetes.io/projected/f1e7e9b7-9ddf-47bd-a640-153a514dc530-kube-api-access-fmfdf\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:06 crc kubenswrapper[4780]: E1210 11:35:06.964055 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:35:07 crc kubenswrapper[4780]: I1210 11:35:07.065887 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-catalog-content\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:07 crc kubenswrapper[4780]: I1210 11:35:07.066309 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmfdf\" (UniqueName: \"kubernetes.io/projected/f1e7e9b7-9ddf-47bd-a640-153a514dc530-kube-api-access-fmfdf\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:07 crc kubenswrapper[4780]: I1210 11:35:07.066623 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-utilities\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:07 crc kubenswrapper[4780]: I1210 11:35:07.066695 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-catalog-content\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:07 crc kubenswrapper[4780]: I1210 11:35:07.067108 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-utilities\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:07 crc kubenswrapper[4780]: I1210 11:35:07.092530 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmfdf\" (UniqueName: \"kubernetes.io/projected/f1e7e9b7-9ddf-47bd-a640-153a514dc530-kube-api-access-fmfdf\") pod \"redhat-marketplace-6ptwv\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:07 crc kubenswrapper[4780]: I1210 11:35:07.232279 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:07 crc kubenswrapper[4780]: I1210 11:35:07.805382 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ptwv"] Dec 10 11:35:08 crc kubenswrapper[4780]: I1210 11:35:08.670972 4780 generic.go:334] "Generic (PLEG): container finished" podID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerID="9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0" exitCode=0 Dec 10 11:35:08 crc kubenswrapper[4780]: I1210 11:35:08.671187 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ptwv" event={"ID":"f1e7e9b7-9ddf-47bd-a640-153a514dc530","Type":"ContainerDied","Data":"9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0"} Dec 10 11:35:08 crc kubenswrapper[4780]: I1210 11:35:08.671318 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ptwv" event={"ID":"f1e7e9b7-9ddf-47bd-a640-153a514dc530","Type":"ContainerStarted","Data":"dd69fe0ebf33d1410d2418d182b509a6c92ca54985a07dff361cd12bfa560a26"} Dec 10 11:35:08 crc kubenswrapper[4780]: E1210 11:35:08.960879 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:35:09 crc kubenswrapper[4780]: I1210 11:35:09.702077 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ptwv" event={"ID":"f1e7e9b7-9ddf-47bd-a640-153a514dc530","Type":"ContainerStarted","Data":"b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a"} Dec 10 11:35:10 crc kubenswrapper[4780]: I1210 11:35:10.714951 4780 generic.go:334] "Generic (PLEG): container finished" podID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerID="b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a" exitCode=0 Dec 10 11:35:10 crc kubenswrapper[4780]: I1210 11:35:10.715018 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ptwv" event={"ID":"f1e7e9b7-9ddf-47bd-a640-153a514dc530","Type":"ContainerDied","Data":"b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a"} Dec 10 11:35:11 crc kubenswrapper[4780]: I1210 11:35:11.730006 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ptwv" event={"ID":"f1e7e9b7-9ddf-47bd-a640-153a514dc530","Type":"ContainerStarted","Data":"497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c"} Dec 10 11:35:11 crc kubenswrapper[4780]: I1210 11:35:11.759217 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6ptwv" podStartSLOduration=3.332866369 podStartE2EDuration="5.759195436s" podCreationTimestamp="2025-12-10 11:35:06 +0000 UTC" firstStartedPulling="2025-12-10 11:35:08.673889899 +0000 UTC m=+3013.527283352" lastFinishedPulling="2025-12-10 11:35:11.100218966 +0000 UTC m=+3015.953612419" observedRunningTime="2025-12-10 11:35:11.755316976 +0000 UTC m=+3016.608710449" watchObservedRunningTime="2025-12-10 11:35:11.759195436 +0000 UTC m=+3016.612588879" Dec 10 11:35:17 crc kubenswrapper[4780]: I1210 11:35:17.233098 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:17 crc kubenswrapper[4780]: I1210 11:35:17.234074 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:17 crc kubenswrapper[4780]: I1210 11:35:17.301535 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:17 crc kubenswrapper[4780]: I1210 11:35:17.902239 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:17 crc kubenswrapper[4780]: E1210 11:35:17.964633 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:35:17 crc kubenswrapper[4780]: I1210 11:35:17.974028 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ptwv"] Dec 10 11:35:19 crc kubenswrapper[4780]: I1210 11:35:19.866727 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6ptwv" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerName="registry-server" containerID="cri-o://497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c" gracePeriod=2 Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.505163 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.670524 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmfdf\" (UniqueName: \"kubernetes.io/projected/f1e7e9b7-9ddf-47bd-a640-153a514dc530-kube-api-access-fmfdf\") pod \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.670658 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-utilities\") pod \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.670774 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-catalog-content\") pod \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\" (UID: \"f1e7e9b7-9ddf-47bd-a640-153a514dc530\") " Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.673586 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-utilities" (OuterVolumeSpecName: "utilities") pod "f1e7e9b7-9ddf-47bd-a640-153a514dc530" (UID: "f1e7e9b7-9ddf-47bd-a640-153a514dc530"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.680203 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e7e9b7-9ddf-47bd-a640-153a514dc530-kube-api-access-fmfdf" (OuterVolumeSpecName: "kube-api-access-fmfdf") pod "f1e7e9b7-9ddf-47bd-a640-153a514dc530" (UID: "f1e7e9b7-9ddf-47bd-a640-153a514dc530"). InnerVolumeSpecName "kube-api-access-fmfdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.691588 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f1e7e9b7-9ddf-47bd-a640-153a514dc530" (UID: "f1e7e9b7-9ddf-47bd-a640-153a514dc530"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.773993 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmfdf\" (UniqueName: \"kubernetes.io/projected/f1e7e9b7-9ddf-47bd-a640-153a514dc530-kube-api-access-fmfdf\") on node \"crc\" DevicePath \"\"" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.774036 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.774045 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1e7e9b7-9ddf-47bd-a640-153a514dc530-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.882141 4780 generic.go:334] "Generic (PLEG): container finished" podID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerID="497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c" exitCode=0 Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.882210 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6ptwv" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.882221 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ptwv" event={"ID":"f1e7e9b7-9ddf-47bd-a640-153a514dc530","Type":"ContainerDied","Data":"497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c"} Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.882307 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6ptwv" event={"ID":"f1e7e9b7-9ddf-47bd-a640-153a514dc530","Type":"ContainerDied","Data":"dd69fe0ebf33d1410d2418d182b509a6c92ca54985a07dff361cd12bfa560a26"} Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.882334 4780 scope.go:117] "RemoveContainer" containerID="497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.917223 4780 scope.go:117] "RemoveContainer" containerID="b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.944464 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ptwv"] Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.958060 4780 scope.go:117] "RemoveContainer" containerID="9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0" Dec 10 11:35:20 crc kubenswrapper[4780]: I1210 11:35:20.962816 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6ptwv"] Dec 10 11:35:20 crc kubenswrapper[4780]: E1210 11:35:20.962915 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:35:21 crc kubenswrapper[4780]: I1210 11:35:21.030001 4780 scope.go:117] "RemoveContainer" containerID="497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c" Dec 10 11:35:21 crc kubenswrapper[4780]: E1210 11:35:21.030728 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c\": container with ID starting with 497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c not found: ID does not exist" containerID="497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c" Dec 10 11:35:21 crc kubenswrapper[4780]: I1210 11:35:21.030781 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c"} err="failed to get container status \"497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c\": rpc error: code = NotFound desc = could not find container \"497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c\": container with ID starting with 497bcc00192e8766d6b1ff9bc08709da653ec26797aa1a77da03cdaea1b9619c not found: ID does not exist" Dec 10 11:35:21 crc kubenswrapper[4780]: I1210 11:35:21.030816 4780 scope.go:117] "RemoveContainer" containerID="b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a" Dec 10 11:35:21 crc kubenswrapper[4780]: E1210 11:35:21.031839 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a\": container with ID starting with b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a not found: ID does not exist" containerID="b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a" Dec 10 11:35:21 crc kubenswrapper[4780]: I1210 11:35:21.031892 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a"} err="failed to get container status \"b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a\": rpc error: code = NotFound desc = could not find container \"b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a\": container with ID starting with b8ff16b189a49ff8396c2b003ffe0add01bff1ebfeb3a1d4da07ecebc05d0e7a not found: ID does not exist" Dec 10 11:35:21 crc kubenswrapper[4780]: I1210 11:35:21.032098 4780 scope.go:117] "RemoveContainer" containerID="9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0" Dec 10 11:35:21 crc kubenswrapper[4780]: E1210 11:35:21.032626 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0\": container with ID starting with 9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0 not found: ID does not exist" containerID="9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0" Dec 10 11:35:21 crc kubenswrapper[4780]: I1210 11:35:21.032669 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0"} err="failed to get container status \"9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0\": rpc error: code = NotFound desc = could not find container \"9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0\": container with ID starting with 9715cbfcf0e866f24da6354685cda4f51affeee75823ff04f2233237d734e7f0 not found: ID does not exist" Dec 10 11:35:21 crc kubenswrapper[4780]: I1210 11:35:21.977433 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" path="/var/lib/kubelet/pods/f1e7e9b7-9ddf-47bd-a640-153a514dc530/volumes" Dec 10 11:35:32 crc kubenswrapper[4780]: E1210 11:35:32.961281 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:35:35 crc kubenswrapper[4780]: E1210 11:35:35.970047 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:35:47 crc kubenswrapper[4780]: E1210 11:35:47.962101 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:35:50 crc kubenswrapper[4780]: E1210 11:35:50.962573 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:35:59 crc kubenswrapper[4780]: E1210 11:35:59.962493 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:36:03 crc kubenswrapper[4780]: E1210 11:36:03.962058 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:36:12 crc kubenswrapper[4780]: E1210 11:36:12.961145 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:36:16 crc kubenswrapper[4780]: E1210 11:36:16.961753 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:36:25 crc kubenswrapper[4780]: E1210 11:36:25.993882 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:36:27 crc kubenswrapper[4780]: I1210 11:36:27.475865 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:36:27 crc kubenswrapper[4780]: I1210 11:36:27.476290 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.723810 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-48qh8"] Dec 10 11:36:31 crc kubenswrapper[4780]: E1210 11:36:31.725553 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerName="extract-utilities" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.725575 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerName="extract-utilities" Dec 10 11:36:31 crc kubenswrapper[4780]: E1210 11:36:31.725619 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerName="extract-content" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.725626 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerName="extract-content" Dec 10 11:36:31 crc kubenswrapper[4780]: E1210 11:36:31.725701 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerName="registry-server" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.725709 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerName="registry-server" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.726056 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e7e9b7-9ddf-47bd-a640-153a514dc530" containerName="registry-server" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.728595 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.740392 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-48qh8"] Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.848122 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-catalog-content\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.848222 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kvw8\" (UniqueName: \"kubernetes.io/projected/9524f5bf-34e9-41a7-9932-4843a0d4e94c-kube-api-access-8kvw8\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.848601 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-utilities\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.951707 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-catalog-content\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.951781 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kvw8\" (UniqueName: \"kubernetes.io/projected/9524f5bf-34e9-41a7-9932-4843a0d4e94c-kube-api-access-8kvw8\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.951968 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-utilities\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.952912 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-utilities\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: I1210 11:36:31.952954 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-catalog-content\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:31 crc kubenswrapper[4780]: E1210 11:36:31.963590 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:36:32 crc kubenswrapper[4780]: I1210 11:36:32.004489 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kvw8\" (UniqueName: \"kubernetes.io/projected/9524f5bf-34e9-41a7-9932-4843a0d4e94c-kube-api-access-8kvw8\") pod \"community-operators-48qh8\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:32 crc kubenswrapper[4780]: I1210 11:36:32.085667 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:33 crc kubenswrapper[4780]: I1210 11:36:33.388128 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-48qh8"] Dec 10 11:36:34 crc kubenswrapper[4780]: I1210 11:36:34.377584 4780 generic.go:334] "Generic (PLEG): container finished" podID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerID="719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6" exitCode=0 Dec 10 11:36:34 crc kubenswrapper[4780]: I1210 11:36:34.377663 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48qh8" event={"ID":"9524f5bf-34e9-41a7-9932-4843a0d4e94c","Type":"ContainerDied","Data":"719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6"} Dec 10 11:36:34 crc kubenswrapper[4780]: I1210 11:36:34.378136 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48qh8" event={"ID":"9524f5bf-34e9-41a7-9932-4843a0d4e94c","Type":"ContainerStarted","Data":"70fd300e8a6997acf4aaab6a0248849783d156fff4a32f6477cb5ba4061604e1"} Dec 10 11:36:34 crc kubenswrapper[4780]: I1210 11:36:34.380550 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:36:36 crc kubenswrapper[4780]: I1210 11:36:36.408015 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48qh8" event={"ID":"9524f5bf-34e9-41a7-9932-4843a0d4e94c","Type":"ContainerStarted","Data":"5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d"} Dec 10 11:36:37 crc kubenswrapper[4780]: I1210 11:36:37.424228 4780 generic.go:334] "Generic (PLEG): container finished" podID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerID="5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d" exitCode=0 Dec 10 11:36:37 crc kubenswrapper[4780]: I1210 11:36:37.424429 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48qh8" event={"ID":"9524f5bf-34e9-41a7-9932-4843a0d4e94c","Type":"ContainerDied","Data":"5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d"} Dec 10 11:36:38 crc kubenswrapper[4780]: E1210 11:36:38.344485 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:36:38 crc kubenswrapper[4780]: E1210 11:36:38.344857 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:36:38 crc kubenswrapper[4780]: E1210 11:36:38.345064 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:36:38 crc kubenswrapper[4780]: E1210 11:36:38.346293 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:36:39 crc kubenswrapper[4780]: I1210 11:36:39.453698 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48qh8" event={"ID":"9524f5bf-34e9-41a7-9932-4843a0d4e94c","Type":"ContainerStarted","Data":"1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3"} Dec 10 11:36:39 crc kubenswrapper[4780]: I1210 11:36:39.492759 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-48qh8" podStartSLOduration=4.657898831 podStartE2EDuration="8.492724087s" podCreationTimestamp="2025-12-10 11:36:31 +0000 UTC" firstStartedPulling="2025-12-10 11:36:34.380232227 +0000 UTC m=+3099.233625670" lastFinishedPulling="2025-12-10 11:36:38.215057473 +0000 UTC m=+3103.068450926" observedRunningTime="2025-12-10 11:36:39.474504352 +0000 UTC m=+3104.327897805" watchObservedRunningTime="2025-12-10 11:36:39.492724087 +0000 UTC m=+3104.346117540" Dec 10 11:36:42 crc kubenswrapper[4780]: I1210 11:36:42.087244 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:42 crc kubenswrapper[4780]: I1210 11:36:42.087637 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:42 crc kubenswrapper[4780]: I1210 11:36:42.147614 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:42 crc kubenswrapper[4780]: E1210 11:36:42.963552 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:36:50 crc kubenswrapper[4780]: E1210 11:36:50.961940 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:36:52 crc kubenswrapper[4780]: I1210 11:36:52.144127 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:52 crc kubenswrapper[4780]: I1210 11:36:52.206409 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-48qh8"] Dec 10 11:36:52 crc kubenswrapper[4780]: I1210 11:36:52.658789 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-48qh8" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerName="registry-server" containerID="cri-o://1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3" gracePeriod=2 Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.256755 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.642574 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-catalog-content\") pod \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.642652 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kvw8\" (UniqueName: \"kubernetes.io/projected/9524f5bf-34e9-41a7-9932-4843a0d4e94c-kube-api-access-8kvw8\") pod \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.642799 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-utilities\") pod \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\" (UID: \"9524f5bf-34e9-41a7-9932-4843a0d4e94c\") " Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.649941 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-utilities" (OuterVolumeSpecName: "utilities") pod "9524f5bf-34e9-41a7-9932-4843a0d4e94c" (UID: "9524f5bf-34e9-41a7-9932-4843a0d4e94c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.668973 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9524f5bf-34e9-41a7-9932-4843a0d4e94c-kube-api-access-8kvw8" (OuterVolumeSpecName: "kube-api-access-8kvw8") pod "9524f5bf-34e9-41a7-9932-4843a0d4e94c" (UID: "9524f5bf-34e9-41a7-9932-4843a0d4e94c"). InnerVolumeSpecName "kube-api-access-8kvw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.681020 4780 generic.go:334] "Generic (PLEG): container finished" podID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerID="1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3" exitCode=0 Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.681089 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48qh8" event={"ID":"9524f5bf-34e9-41a7-9932-4843a0d4e94c","Type":"ContainerDied","Data":"1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3"} Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.681139 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-48qh8" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.681200 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-48qh8" event={"ID":"9524f5bf-34e9-41a7-9932-4843a0d4e94c","Type":"ContainerDied","Data":"70fd300e8a6997acf4aaab6a0248849783d156fff4a32f6477cb5ba4061604e1"} Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.681224 4780 scope.go:117] "RemoveContainer" containerID="1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.714196 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9524f5bf-34e9-41a7-9932-4843a0d4e94c" (UID: "9524f5bf-34e9-41a7-9932-4843a0d4e94c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.723130 4780 scope.go:117] "RemoveContainer" containerID="5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.746240 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.746282 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9524f5bf-34e9-41a7-9932-4843a0d4e94c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.746295 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kvw8\" (UniqueName: \"kubernetes.io/projected/9524f5bf-34e9-41a7-9932-4843a0d4e94c-kube-api-access-8kvw8\") on node \"crc\" DevicePath \"\"" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.750692 4780 scope.go:117] "RemoveContainer" containerID="719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.812248 4780 scope.go:117] "RemoveContainer" containerID="1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3" Dec 10 11:36:53 crc kubenswrapper[4780]: E1210 11:36:53.813195 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3\": container with ID starting with 1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3 not found: ID does not exist" containerID="1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.813245 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3"} err="failed to get container status \"1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3\": rpc error: code = NotFound desc = could not find container \"1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3\": container with ID starting with 1612913b637a45b763945774610c93abaf8b001a793ef7e951dfe8130e12e0a3 not found: ID does not exist" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.813291 4780 scope.go:117] "RemoveContainer" containerID="5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d" Dec 10 11:36:53 crc kubenswrapper[4780]: E1210 11:36:53.813865 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d\": container with ID starting with 5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d not found: ID does not exist" containerID="5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.813892 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d"} err="failed to get container status \"5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d\": rpc error: code = NotFound desc = could not find container \"5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d\": container with ID starting with 5d83918a126f45cf33f62710ee1354c43a01369d14feb1ad14aacbf65176db8d not found: ID does not exist" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.813908 4780 scope.go:117] "RemoveContainer" containerID="719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6" Dec 10 11:36:53 crc kubenswrapper[4780]: E1210 11:36:53.814399 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6\": container with ID starting with 719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6 not found: ID does not exist" containerID="719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6" Dec 10 11:36:53 crc kubenswrapper[4780]: I1210 11:36:53.814528 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6"} err="failed to get container status \"719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6\": rpc error: code = NotFound desc = could not find container \"719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6\": container with ID starting with 719f902301a3c7e1d6aeee38a0778976cd37d0ba1a0f8148bdbb6480e3589dc6 not found: ID does not exist" Dec 10 11:36:54 crc kubenswrapper[4780]: I1210 11:36:54.034108 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-48qh8"] Dec 10 11:36:54 crc kubenswrapper[4780]: I1210 11:36:54.045634 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-48qh8"] Dec 10 11:36:55 crc kubenswrapper[4780]: E1210 11:36:55.973704 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:36:55 crc kubenswrapper[4780]: I1210 11:36:55.981358 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" path="/var/lib/kubelet/pods/9524f5bf-34e9-41a7-9932-4843a0d4e94c/volumes" Dec 10 11:36:57 crc kubenswrapper[4780]: I1210 11:36:57.475894 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:36:57 crc kubenswrapper[4780]: I1210 11:36:57.476532 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:37:04 crc kubenswrapper[4780]: E1210 11:37:04.962358 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:37:08 crc kubenswrapper[4780]: E1210 11:37:08.103892 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:37:08 crc kubenswrapper[4780]: E1210 11:37:08.104631 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:37:08 crc kubenswrapper[4780]: E1210 11:37:08.105048 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:37:08 crc kubenswrapper[4780]: E1210 11:37:08.106284 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:37:16 crc kubenswrapper[4780]: E1210 11:37:16.963078 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:37:21 crc kubenswrapper[4780]: E1210 11:37:20.963569 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:37:27 crc kubenswrapper[4780]: I1210 11:37:27.476232 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:37:27 crc kubenswrapper[4780]: I1210 11:37:27.477426 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:37:27 crc kubenswrapper[4780]: I1210 11:37:27.477573 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:37:27 crc kubenswrapper[4780]: I1210 11:37:27.478949 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:37:27 crc kubenswrapper[4780]: I1210 11:37:27.479024 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" gracePeriod=600 Dec 10 11:37:27 crc kubenswrapper[4780]: E1210 11:37:27.622084 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:37:28 crc kubenswrapper[4780]: I1210 11:37:28.307150 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" exitCode=0 Dec 10 11:37:28 crc kubenswrapper[4780]: I1210 11:37:28.307218 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1"} Dec 10 11:37:28 crc kubenswrapper[4780]: I1210 11:37:28.307573 4780 scope.go:117] "RemoveContainer" containerID="4894a3439f4512784fa82a7e629e0e227ca42119000a7b39d3567af26b9dd9a7" Dec 10 11:37:28 crc kubenswrapper[4780]: I1210 11:37:28.308607 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:37:28 crc kubenswrapper[4780]: E1210 11:37:28.309056 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:37:28 crc kubenswrapper[4780]: E1210 11:37:28.960817 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:37:35 crc kubenswrapper[4780]: E1210 11:37:35.981355 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:37:42 crc kubenswrapper[4780]: I1210 11:37:42.960614 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:37:42 crc kubenswrapper[4780]: E1210 11:37:42.962504 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:37:43 crc kubenswrapper[4780]: E1210 11:37:43.960757 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:37:48 crc kubenswrapper[4780]: E1210 11:37:48.962160 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:37:53 crc kubenswrapper[4780]: I1210 11:37:53.960042 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:37:53 crc kubenswrapper[4780]: E1210 11:37:53.961153 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:37:57 crc kubenswrapper[4780]: E1210 11:37:57.961379 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:38:03 crc kubenswrapper[4780]: E1210 11:38:03.965371 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:38:05 crc kubenswrapper[4780]: I1210 11:38:05.984095 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:38:05 crc kubenswrapper[4780]: E1210 11:38:05.985033 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:38:10 crc kubenswrapper[4780]: E1210 11:38:10.962489 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:38:14 crc kubenswrapper[4780]: E1210 11:38:14.963673 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:38:16 crc kubenswrapper[4780]: I1210 11:38:16.961798 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:38:16 crc kubenswrapper[4780]: E1210 11:38:16.962803 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:38:26 crc kubenswrapper[4780]: E1210 11:38:26.007012 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:38:27 crc kubenswrapper[4780]: E1210 11:38:27.962596 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:38:28 crc kubenswrapper[4780]: I1210 11:38:28.960260 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:38:28 crc kubenswrapper[4780]: E1210 11:38:28.960701 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:38:37 crc kubenswrapper[4780]: E1210 11:38:37.961842 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:38:42 crc kubenswrapper[4780]: I1210 11:38:42.959951 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:38:42 crc kubenswrapper[4780]: E1210 11:38:42.961105 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:38:42 crc kubenswrapper[4780]: E1210 11:38:42.961749 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:38:52 crc kubenswrapper[4780]: E1210 11:38:52.964442 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:38:53 crc kubenswrapper[4780]: I1210 11:38:53.958648 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:38:53 crc kubenswrapper[4780]: E1210 11:38:53.959628 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:38:56 crc kubenswrapper[4780]: E1210 11:38:56.961523 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:39:04 crc kubenswrapper[4780]: I1210 11:39:04.959079 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:39:04 crc kubenswrapper[4780]: E1210 11:39:04.961096 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:39:07 crc kubenswrapper[4780]: E1210 11:39:07.961875 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:39:11 crc kubenswrapper[4780]: E1210 11:39:11.962440 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:39:16 crc kubenswrapper[4780]: I1210 11:39:16.959482 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:39:16 crc kubenswrapper[4780]: E1210 11:39:16.960276 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:39:22 crc kubenswrapper[4780]: E1210 11:39:22.964378 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:39:23 crc kubenswrapper[4780]: E1210 11:39:23.961072 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:39:29 crc kubenswrapper[4780]: I1210 11:39:29.959710 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:39:29 crc kubenswrapper[4780]: E1210 11:39:29.960648 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:39:34 crc kubenswrapper[4780]: E1210 11:39:34.961502 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:39:35 crc kubenswrapper[4780]: E1210 11:39:35.971967 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:39:40 crc kubenswrapper[4780]: I1210 11:39:40.961439 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:39:40 crc kubenswrapper[4780]: E1210 11:39:40.962802 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:39:45 crc kubenswrapper[4780]: E1210 11:39:45.988317 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:39:46 crc kubenswrapper[4780]: E1210 11:39:46.960955 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:39:55 crc kubenswrapper[4780]: I1210 11:39:55.185912 4780 generic.go:334] "Generic (PLEG): container finished" podID="460516db-cb7d-4309-bbf8-1b4af468dac4" containerID="42efe205d92ab7d4bbc73529e51de9a0e5ddf8361ce47f68afbc1ec9d65aa2cc" exitCode=2 Dec 10 11:39:55 crc kubenswrapper[4780]: I1210 11:39:55.186080 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" event={"ID":"460516db-cb7d-4309-bbf8-1b4af468dac4","Type":"ContainerDied","Data":"42efe205d92ab7d4bbc73529e51de9a0e5ddf8361ce47f68afbc1ec9d65aa2cc"} Dec 10 11:39:55 crc kubenswrapper[4780]: I1210 11:39:55.969372 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:39:55 crc kubenswrapper[4780]: E1210 11:39:55.973194 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.723207 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.731827 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-inventory\") pod \"460516db-cb7d-4309-bbf8-1b4af468dac4\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.731897 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-ssh-key\") pod \"460516db-cb7d-4309-bbf8-1b4af468dac4\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.732172 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmvkd\" (UniqueName: \"kubernetes.io/projected/460516db-cb7d-4309-bbf8-1b4af468dac4-kube-api-access-zmvkd\") pod \"460516db-cb7d-4309-bbf8-1b4af468dac4\" (UID: \"460516db-cb7d-4309-bbf8-1b4af468dac4\") " Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.738334 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/460516db-cb7d-4309-bbf8-1b4af468dac4-kube-api-access-zmvkd" (OuterVolumeSpecName: "kube-api-access-zmvkd") pod "460516db-cb7d-4309-bbf8-1b4af468dac4" (UID: "460516db-cb7d-4309-bbf8-1b4af468dac4"). InnerVolumeSpecName "kube-api-access-zmvkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.884440 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-inventory" (OuterVolumeSpecName: "inventory") pod "460516db-cb7d-4309-bbf8-1b4af468dac4" (UID: "460516db-cb7d-4309-bbf8-1b4af468dac4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.894993 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "460516db-cb7d-4309-bbf8-1b4af468dac4" (UID: "460516db-cb7d-4309-bbf8-1b4af468dac4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.895599 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.895720 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/460516db-cb7d-4309-bbf8-1b4af468dac4-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:39:56 crc kubenswrapper[4780]: I1210 11:39:56.895788 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmvkd\" (UniqueName: \"kubernetes.io/projected/460516db-cb7d-4309-bbf8-1b4af468dac4-kube-api-access-zmvkd\") on node \"crc\" DevicePath \"\"" Dec 10 11:39:56 crc kubenswrapper[4780]: E1210 11:39:56.962023 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:39:57 crc kubenswrapper[4780]: I1210 11:39:57.214132 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" event={"ID":"460516db-cb7d-4309-bbf8-1b4af468dac4","Type":"ContainerDied","Data":"6a91d2b21fb49a5aae000d522a0ba615e71beb9ba8ef94da53f4665b94884b85"} Dec 10 11:39:57 crc kubenswrapper[4780]: I1210 11:39:57.214182 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a91d2b21fb49a5aae000d522a0ba615e71beb9ba8ef94da53f4665b94884b85" Dec 10 11:39:57 crc kubenswrapper[4780]: I1210 11:39:57.214244 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8" Dec 10 11:40:01 crc kubenswrapper[4780]: E1210 11:40:01.962332 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:40:08 crc kubenswrapper[4780]: E1210 11:40:08.072907 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:40:08 crc kubenswrapper[4780]: I1210 11:40:08.960140 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:40:08 crc kubenswrapper[4780]: E1210 11:40:08.961025 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.041372 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm"] Dec 10 11:40:14 crc kubenswrapper[4780]: E1210 11:40:14.042958 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerName="extract-utilities" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.043001 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerName="extract-utilities" Dec 10 11:40:14 crc kubenswrapper[4780]: E1210 11:40:14.043054 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerName="registry-server" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.043065 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerName="registry-server" Dec 10 11:40:14 crc kubenswrapper[4780]: E1210 11:40:14.043139 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerName="extract-content" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.043152 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerName="extract-content" Dec 10 11:40:14 crc kubenswrapper[4780]: E1210 11:40:14.043192 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="460516db-cb7d-4309-bbf8-1b4af468dac4" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.043202 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="460516db-cb7d-4309-bbf8-1b4af468dac4" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.043562 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="460516db-cb7d-4309-bbf8-1b4af468dac4" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.043592 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="9524f5bf-34e9-41a7-9932-4843a0d4e94c" containerName="registry-server" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.044739 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.048613 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.048613 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.049479 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.050089 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.063024 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm"] Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.141370 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.141509 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqw6c\" (UniqueName: \"kubernetes.io/projected/56ce8299-173b-429b-b042-f78fb64b6a74-kube-api-access-nqw6c\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.141619 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.244288 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.245141 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.245300 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqw6c\" (UniqueName: \"kubernetes.io/projected/56ce8299-173b-429b-b042-f78fb64b6a74-kube-api-access-nqw6c\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.250780 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.252255 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.268309 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqw6c\" (UniqueName: \"kubernetes.io/projected/56ce8299-173b-429b-b042-f78fb64b6a74-kube-api-access-nqw6c\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.378728 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:40:14 crc kubenswrapper[4780]: I1210 11:40:14.978542 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm"] Dec 10 11:40:15 crc kubenswrapper[4780]: I1210 11:40:15.471242 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" event={"ID":"56ce8299-173b-429b-b042-f78fb64b6a74","Type":"ContainerStarted","Data":"9d8f417be7b7e7658213537dbdc917c4a4d2e76449c3dd5bc05582f39fa5c220"} Dec 10 11:40:16 crc kubenswrapper[4780]: I1210 11:40:16.499439 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" event={"ID":"56ce8299-173b-429b-b042-f78fb64b6a74","Type":"ContainerStarted","Data":"9339f08fdd8334a880bfb69d2a235ad11bfabaa40eeaa17cecfe00e66d96b0c1"} Dec 10 11:40:16 crc kubenswrapper[4780]: I1210 11:40:16.548286 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" podStartSLOduration=1.704405908 podStartE2EDuration="2.548264155s" podCreationTimestamp="2025-12-10 11:40:14 +0000 UTC" firstStartedPulling="2025-12-10 11:40:14.990420079 +0000 UTC m=+3319.843813522" lastFinishedPulling="2025-12-10 11:40:15.834278306 +0000 UTC m=+3320.687671769" observedRunningTime="2025-12-10 11:40:16.534219727 +0000 UTC m=+3321.387613170" watchObservedRunningTime="2025-12-10 11:40:16.548264155 +0000 UTC m=+3321.401657598" Dec 10 11:40:16 crc kubenswrapper[4780]: E1210 11:40:16.960319 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:40:18 crc kubenswrapper[4780]: E1210 11:40:18.960382 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:40:20 crc kubenswrapper[4780]: I1210 11:40:20.959368 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:40:20 crc kubenswrapper[4780]: E1210 11:40:20.960489 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:40:31 crc kubenswrapper[4780]: E1210 11:40:31.962566 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:40:32 crc kubenswrapper[4780]: I1210 11:40:32.959802 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:40:32 crc kubenswrapper[4780]: E1210 11:40:32.960612 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:40:33 crc kubenswrapper[4780]: E1210 11:40:33.961334 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:40:44 crc kubenswrapper[4780]: I1210 11:40:44.960703 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:40:44 crc kubenswrapper[4780]: E1210 11:40:44.962190 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:40:44 crc kubenswrapper[4780]: E1210 11:40:44.962970 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:40:48 crc kubenswrapper[4780]: E1210 11:40:48.963511 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:40:55 crc kubenswrapper[4780]: E1210 11:40:55.979617 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:40:58 crc kubenswrapper[4780]: I1210 11:40:58.959763 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:40:58 crc kubenswrapper[4780]: E1210 11:40:58.961048 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:41:00 crc kubenswrapper[4780]: E1210 11:41:00.962059 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:41:10 crc kubenswrapper[4780]: E1210 11:41:10.962496 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:41:12 crc kubenswrapper[4780]: I1210 11:41:12.959502 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:41:12 crc kubenswrapper[4780]: E1210 11:41:12.961094 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:41:15 crc kubenswrapper[4780]: E1210 11:41:15.976770 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:41:22 crc kubenswrapper[4780]: E1210 11:41:22.961324 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:41:26 crc kubenswrapper[4780]: I1210 11:41:26.174289 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:41:26 crc kubenswrapper[4780]: E1210 11:41:26.175000 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:41:29 crc kubenswrapper[4780]: E1210 11:41:29.015373 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:41:37 crc kubenswrapper[4780]: I1210 11:41:37.959105 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:41:37 crc kubenswrapper[4780]: E1210 11:41:37.960190 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:41:37 crc kubenswrapper[4780]: E1210 11:41:37.965371 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:41:39 crc kubenswrapper[4780]: E1210 11:41:39.961479 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.294264 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t2hdz"] Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.309501 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.340060 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t2hdz"] Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.413011 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-utilities\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.413100 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-catalog-content\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.413232 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztbr5\" (UniqueName: \"kubernetes.io/projected/c35c9ff8-ac26-450c-ac71-d69032da1225-kube-api-access-ztbr5\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.515051 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-utilities\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.515115 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-catalog-content\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.515241 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztbr5\" (UniqueName: \"kubernetes.io/projected/c35c9ff8-ac26-450c-ac71-d69032da1225-kube-api-access-ztbr5\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.516338 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-utilities\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.516615 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-catalog-content\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.554896 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztbr5\" (UniqueName: \"kubernetes.io/projected/c35c9ff8-ac26-450c-ac71-d69032da1225-kube-api-access-ztbr5\") pod \"redhat-operators-t2hdz\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.929534 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:41:51 crc kubenswrapper[4780]: I1210 11:41:51.961777 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:41:51 crc kubenswrapper[4780]: E1210 11:41:51.963787 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:41:52 crc kubenswrapper[4780]: E1210 11:41:52.025282 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:41:52 crc kubenswrapper[4780]: I1210 11:41:52.700674 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t2hdz"] Dec 10 11:41:52 crc kubenswrapper[4780]: W1210 11:41:52.702950 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc35c9ff8_ac26_450c_ac71_d69032da1225.slice/crio-19024e75ea224f4e7be6ea19b228003bb13eaa9d74e17f8077e64239a7edc855 WatchSource:0}: Error finding container 19024e75ea224f4e7be6ea19b228003bb13eaa9d74e17f8077e64239a7edc855: Status 404 returned error can't find the container with id 19024e75ea224f4e7be6ea19b228003bb13eaa9d74e17f8077e64239a7edc855 Dec 10 11:41:52 crc kubenswrapper[4780]: I1210 11:41:52.962853 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:41:53 crc kubenswrapper[4780]: E1210 11:41:53.064657 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:41:53 crc kubenswrapper[4780]: E1210 11:41:53.064792 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:41:53 crc kubenswrapper[4780]: E1210 11:41:53.065066 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:41:53 crc kubenswrapper[4780]: E1210 11:41:53.067027 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:41:53 crc kubenswrapper[4780]: I1210 11:41:53.179212 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2hdz" event={"ID":"c35c9ff8-ac26-450c-ac71-d69032da1225","Type":"ContainerStarted","Data":"19024e75ea224f4e7be6ea19b228003bb13eaa9d74e17f8077e64239a7edc855"} Dec 10 11:41:56 crc kubenswrapper[4780]: I1210 11:41:56.303250 4780 generic.go:334] "Generic (PLEG): container finished" podID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerID="b4bd50fd36c1ef2b8d459f101ece9c80c149536edf8004f80ebb49b82edceced" exitCode=0 Dec 10 11:41:56 crc kubenswrapper[4780]: I1210 11:41:56.304169 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2hdz" event={"ID":"c35c9ff8-ac26-450c-ac71-d69032da1225","Type":"ContainerDied","Data":"b4bd50fd36c1ef2b8d459f101ece9c80c149536edf8004f80ebb49b82edceced"} Dec 10 11:42:02 crc kubenswrapper[4780]: I1210 11:42:02.396695 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2hdz" event={"ID":"c35c9ff8-ac26-450c-ac71-d69032da1225","Type":"ContainerStarted","Data":"d944103c92c8e37378b659aa2e1121ebc9501c4b39cb93dbc0ce03eac32a3dbf"} Dec 10 11:42:04 crc kubenswrapper[4780]: E1210 11:42:04.964362 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:42:06 crc kubenswrapper[4780]: I1210 11:42:06.167651 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:42:06 crc kubenswrapper[4780]: E1210 11:42:06.169068 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:42:06 crc kubenswrapper[4780]: E1210 11:42:06.172125 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:42:15 crc kubenswrapper[4780]: I1210 11:42:15.849423 4780 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.048096728s: [/var/lib/containers/storage/overlay/b14ef013fe62c1a706c452400db494f9f2a3edffe649e26b91f9b82efe91c218/diff /var/log/pods/openstack_heat-api-5bffc7b484-bhjz4_43cf9913-8179-4d01-a9d8-40ae5078b366/heat-api/0.log]; will not log again for this container unless duration exceeds 2s Dec 10 11:42:15 crc kubenswrapper[4780]: I1210 11:42:15.871137 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-csmkt" podUID="d235302b-56b1-4515-9f26-4f0ea884aa87" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:42:16 crc kubenswrapper[4780]: I1210 11:42:15.996346 4780 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.21171257s: [/var/lib/containers/storage/overlay/f3ca2f163b8d9258cba4abfc2f649601ccf5cb1fa7349f1d45125bb9ac317ace/diff /var/log/pods/openstack_cinder-scheduler-0_274acc05-0f10-48e5-8fb8-44bc1ddca126/probe/0.log]; will not log again for this container unless duration exceeds 2s Dec 10 11:42:16 crc kubenswrapper[4780]: I1210 11:42:15.997284 4780 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded" start-of-body= Dec 10 11:42:16 crc kubenswrapper[4780]: I1210 11:42:15.997353 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded" Dec 10 11:42:16 crc kubenswrapper[4780]: I1210 11:42:16.126358 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" podUID="79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.123:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:42:16 crc kubenswrapper[4780]: I1210 11:42:16.167410 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-6kmsn" podUID="79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.123:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:42:16 crc kubenswrapper[4780]: I1210 11:42:16.169153 4780 generic.go:334] "Generic (PLEG): container finished" podID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerID="d944103c92c8e37378b659aa2e1121ebc9501c4b39cb93dbc0ce03eac32a3dbf" exitCode=0 Dec 10 11:42:16 crc kubenswrapper[4780]: I1210 11:42:16.169206 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2hdz" event={"ID":"c35c9ff8-ac26-450c-ac71-d69032da1225","Type":"ContainerDied","Data":"d944103c92c8e37378b659aa2e1121ebc9501c4b39cb93dbc0ce03eac32a3dbf"} Dec 10 11:42:17 crc kubenswrapper[4780]: E1210 11:42:17.090504 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:42:17 crc kubenswrapper[4780]: E1210 11:42:17.091107 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:42:17 crc kubenswrapper[4780]: E1210 11:42:17.091456 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:42:17 crc kubenswrapper[4780]: E1210 11:42:17.093071 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:42:17 crc kubenswrapper[4780]: I1210 11:42:17.190254 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2hdz" event={"ID":"c35c9ff8-ac26-450c-ac71-d69032da1225","Type":"ContainerStarted","Data":"a04084e5e52f6ae4ad2152a3ee60aa129034b95149802db302599563546ebd30"} Dec 10 11:42:17 crc kubenswrapper[4780]: I1210 11:42:17.222005 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t2hdz" podStartSLOduration=5.714227436 podStartE2EDuration="26.221978772s" podCreationTimestamp="2025-12-10 11:41:51 +0000 UTC" firstStartedPulling="2025-12-10 11:41:56.309335784 +0000 UTC m=+3421.162729227" lastFinishedPulling="2025-12-10 11:42:16.81708712 +0000 UTC m=+3441.670480563" observedRunningTime="2025-12-10 11:42:17.217167899 +0000 UTC m=+3442.070561342" watchObservedRunningTime="2025-12-10 11:42:17.221978772 +0000 UTC m=+3442.075372215" Dec 10 11:42:18 crc kubenswrapper[4780]: I1210 11:42:18.960235 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:42:18 crc kubenswrapper[4780]: E1210 11:42:18.961046 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:42:18 crc kubenswrapper[4780]: E1210 11:42:18.962913 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:42:21 crc kubenswrapper[4780]: I1210 11:42:21.936844 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:42:21 crc kubenswrapper[4780]: I1210 11:42:21.937738 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:42:23 crc kubenswrapper[4780]: I1210 11:42:23.000233 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t2hdz" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="registry-server" probeResult="failure" output=< Dec 10 11:42:23 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:42:23 crc kubenswrapper[4780]: > Dec 10 11:42:28 crc kubenswrapper[4780]: E1210 11:42:28.963941 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:42:31 crc kubenswrapper[4780]: E1210 11:42:31.962642 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:42:32 crc kubenswrapper[4780]: I1210 11:42:32.005277 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:42:32 crc kubenswrapper[4780]: I1210 11:42:32.068644 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:42:33 crc kubenswrapper[4780]: I1210 11:42:33.958855 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:42:34 crc kubenswrapper[4780]: I1210 11:42:34.872504 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"e15ab34e36ac32b53834cffe21875a8d05b051c4c02a437375344ed40ed90da1"} Dec 10 11:42:35 crc kubenswrapper[4780]: I1210 11:42:35.562253 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t2hdz"] Dec 10 11:42:35 crc kubenswrapper[4780]: I1210 11:42:35.562836 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t2hdz" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="registry-server" containerID="cri-o://a04084e5e52f6ae4ad2152a3ee60aa129034b95149802db302599563546ebd30" gracePeriod=2 Dec 10 11:42:35 crc kubenswrapper[4780]: I1210 11:42:35.890459 4780 generic.go:334] "Generic (PLEG): container finished" podID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerID="a04084e5e52f6ae4ad2152a3ee60aa129034b95149802db302599563546ebd30" exitCode=0 Dec 10 11:42:35 crc kubenswrapper[4780]: I1210 11:42:35.890551 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2hdz" event={"ID":"c35c9ff8-ac26-450c-ac71-d69032da1225","Type":"ContainerDied","Data":"a04084e5e52f6ae4ad2152a3ee60aa129034b95149802db302599563546ebd30"} Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.484367 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.528372 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-utilities\") pod \"c35c9ff8-ac26-450c-ac71-d69032da1225\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.529705 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-utilities" (OuterVolumeSpecName: "utilities") pod "c35c9ff8-ac26-450c-ac71-d69032da1225" (UID: "c35c9ff8-ac26-450c-ac71-d69032da1225"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.529964 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-catalog-content\") pod \"c35c9ff8-ac26-450c-ac71-d69032da1225\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.530017 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztbr5\" (UniqueName: \"kubernetes.io/projected/c35c9ff8-ac26-450c-ac71-d69032da1225-kube-api-access-ztbr5\") pod \"c35c9ff8-ac26-450c-ac71-d69032da1225\" (UID: \"c35c9ff8-ac26-450c-ac71-d69032da1225\") " Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.530812 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.543075 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c35c9ff8-ac26-450c-ac71-d69032da1225-kube-api-access-ztbr5" (OuterVolumeSpecName: "kube-api-access-ztbr5") pod "c35c9ff8-ac26-450c-ac71-d69032da1225" (UID: "c35c9ff8-ac26-450c-ac71-d69032da1225"). InnerVolumeSpecName "kube-api-access-ztbr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.633175 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztbr5\" (UniqueName: \"kubernetes.io/projected/c35c9ff8-ac26-450c-ac71-d69032da1225-kube-api-access-ztbr5\") on node \"crc\" DevicePath \"\"" Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.713130 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c35c9ff8-ac26-450c-ac71-d69032da1225" (UID: "c35c9ff8-ac26-450c-ac71-d69032da1225"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.736841 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c35c9ff8-ac26-450c-ac71-d69032da1225-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.921591 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t2hdz" event={"ID":"c35c9ff8-ac26-450c-ac71-d69032da1225","Type":"ContainerDied","Data":"19024e75ea224f4e7be6ea19b228003bb13eaa9d74e17f8077e64239a7edc855"} Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.921939 4780 scope.go:117] "RemoveContainer" containerID="a04084e5e52f6ae4ad2152a3ee60aa129034b95149802db302599563546ebd30" Dec 10 11:42:37 crc kubenswrapper[4780]: I1210 11:42:37.922167 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t2hdz" Dec 10 11:42:38 crc kubenswrapper[4780]: I1210 11:42:38.033014 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t2hdz"] Dec 10 11:42:38 crc kubenswrapper[4780]: I1210 11:42:38.033229 4780 scope.go:117] "RemoveContainer" containerID="d944103c92c8e37378b659aa2e1121ebc9501c4b39cb93dbc0ce03eac32a3dbf" Dec 10 11:42:38 crc kubenswrapper[4780]: I1210 11:42:38.041744 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t2hdz"] Dec 10 11:42:38 crc kubenswrapper[4780]: I1210 11:42:38.132978 4780 scope.go:117] "RemoveContainer" containerID="b4bd50fd36c1ef2b8d459f101ece9c80c149536edf8004f80ebb49b82edceced" Dec 10 11:42:39 crc kubenswrapper[4780]: I1210 11:42:39.981025 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" path="/var/lib/kubelet/pods/c35c9ff8-ac26-450c-ac71-d69032da1225/volumes" Dec 10 11:42:43 crc kubenswrapper[4780]: E1210 11:42:43.962321 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:42:44 crc kubenswrapper[4780]: E1210 11:42:44.965953 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:42:54 crc kubenswrapper[4780]: E1210 11:42:54.964377 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.924340 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r249g"] Dec 10 11:42:56 crc kubenswrapper[4780]: E1210 11:42:56.925495 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="extract-content" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.925516 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="extract-content" Dec 10 11:42:56 crc kubenswrapper[4780]: E1210 11:42:56.925537 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="registry-server" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.925545 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="registry-server" Dec 10 11:42:56 crc kubenswrapper[4780]: E1210 11:42:56.925582 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="extract-utilities" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.925591 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="extract-utilities" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.925858 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c35c9ff8-ac26-450c-ac71-d69032da1225" containerName="registry-server" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.927690 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.950839 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r249g"] Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.987912 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/440138f1-b946-4712-a553-fccc76e51bc2-catalog-content\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.989913 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/440138f1-b946-4712-a553-fccc76e51bc2-utilities\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:56 crc kubenswrapper[4780]: I1210 11:42:56.990097 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb4nw\" (UniqueName: \"kubernetes.io/projected/440138f1-b946-4712-a553-fccc76e51bc2-kube-api-access-sb4nw\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:57 crc kubenswrapper[4780]: I1210 11:42:57.094156 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/440138f1-b946-4712-a553-fccc76e51bc2-utilities\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:57 crc kubenswrapper[4780]: I1210 11:42:57.094262 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb4nw\" (UniqueName: \"kubernetes.io/projected/440138f1-b946-4712-a553-fccc76e51bc2-kube-api-access-sb4nw\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:57 crc kubenswrapper[4780]: I1210 11:42:57.094418 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/440138f1-b946-4712-a553-fccc76e51bc2-catalog-content\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:57 crc kubenswrapper[4780]: I1210 11:42:57.095144 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/440138f1-b946-4712-a553-fccc76e51bc2-utilities\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:57 crc kubenswrapper[4780]: I1210 11:42:57.095269 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/440138f1-b946-4712-a553-fccc76e51bc2-catalog-content\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:57 crc kubenswrapper[4780]: I1210 11:42:57.120143 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb4nw\" (UniqueName: \"kubernetes.io/projected/440138f1-b946-4712-a553-fccc76e51bc2-kube-api-access-sb4nw\") pod \"certified-operators-r249g\" (UID: \"440138f1-b946-4712-a553-fccc76e51bc2\") " pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:57 crc kubenswrapper[4780]: I1210 11:42:57.254744 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:42:57 crc kubenswrapper[4780]: E1210 11:42:57.971031 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:42:57 crc kubenswrapper[4780]: I1210 11:42:57.989222 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r249g"] Dec 10 11:42:58 crc kubenswrapper[4780]: I1210 11:42:58.230582 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r249g" event={"ID":"440138f1-b946-4712-a553-fccc76e51bc2","Type":"ContainerStarted","Data":"b534ae880aa5ebe7a4622e68c80f103106e7f01412308edb48ba07914e18a3ec"} Dec 10 11:42:59 crc kubenswrapper[4780]: I1210 11:42:59.246678 4780 generic.go:334] "Generic (PLEG): container finished" podID="440138f1-b946-4712-a553-fccc76e51bc2" containerID="d05657b224688ecaf562d1bc0a80380803db44f054a5c2147ade573c238597d0" exitCode=0 Dec 10 11:42:59 crc kubenswrapper[4780]: I1210 11:42:59.246864 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r249g" event={"ID":"440138f1-b946-4712-a553-fccc76e51bc2","Type":"ContainerDied","Data":"d05657b224688ecaf562d1bc0a80380803db44f054a5c2147ade573c238597d0"} Dec 10 11:43:08 crc kubenswrapper[4780]: I1210 11:43:08.398386 4780 generic.go:334] "Generic (PLEG): container finished" podID="440138f1-b946-4712-a553-fccc76e51bc2" containerID="6b22828f74be1e24dea7796cfe11e9f62afef3e2f08ceed70c87955103a4a122" exitCode=0 Dec 10 11:43:08 crc kubenswrapper[4780]: I1210 11:43:08.398443 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r249g" event={"ID":"440138f1-b946-4712-a553-fccc76e51bc2","Type":"ContainerDied","Data":"6b22828f74be1e24dea7796cfe11e9f62afef3e2f08ceed70c87955103a4a122"} Dec 10 11:43:08 crc kubenswrapper[4780]: E1210 11:43:08.962101 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:43:08 crc kubenswrapper[4780]: E1210 11:43:08.962746 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:43:10 crc kubenswrapper[4780]: I1210 11:43:10.439126 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r249g" event={"ID":"440138f1-b946-4712-a553-fccc76e51bc2","Type":"ContainerStarted","Data":"9544e168424337c9d69794f8b6bbebf6d9fc6d6166c3eea7679a7585108147e5"} Dec 10 11:43:10 crc kubenswrapper[4780]: I1210 11:43:10.474610 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r249g" podStartSLOduration=4.446377568 podStartE2EDuration="14.474583522s" podCreationTimestamp="2025-12-10 11:42:56 +0000 UTC" firstStartedPulling="2025-12-10 11:42:59.250575843 +0000 UTC m=+3484.103969286" lastFinishedPulling="2025-12-10 11:43:09.278781797 +0000 UTC m=+3494.132175240" observedRunningTime="2025-12-10 11:43:10.459801646 +0000 UTC m=+3495.313195229" watchObservedRunningTime="2025-12-10 11:43:10.474583522 +0000 UTC m=+3495.327976965" Dec 10 11:43:17 crc kubenswrapper[4780]: I1210 11:43:17.256484 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:43:17 crc kubenswrapper[4780]: I1210 11:43:17.257085 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:43:17 crc kubenswrapper[4780]: I1210 11:43:17.319125 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:43:17 crc kubenswrapper[4780]: I1210 11:43:17.770801 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r249g" Dec 10 11:43:18 crc kubenswrapper[4780]: I1210 11:43:18.129743 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r249g"] Dec 10 11:43:18 crc kubenswrapper[4780]: I1210 11:43:18.242884 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wmt8w"] Dec 10 11:43:18 crc kubenswrapper[4780]: I1210 11:43:18.243274 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wmt8w" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerName="registry-server" containerID="cri-o://694893effdd9d9f8acbc980e4eaa74344498e62666a9cf69958fc2d879117967" gracePeriod=2 Dec 10 11:43:19 crc kubenswrapper[4780]: I1210 11:43:19.571913 4780 generic.go:334] "Generic (PLEG): container finished" podID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerID="694893effdd9d9f8acbc980e4eaa74344498e62666a9cf69958fc2d879117967" exitCode=0 Dec 10 11:43:19 crc kubenswrapper[4780]: I1210 11:43:19.571965 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wmt8w" event={"ID":"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc","Type":"ContainerDied","Data":"694893effdd9d9f8acbc980e4eaa74344498e62666a9cf69958fc2d879117967"} Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.108888 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.157835 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kshh\" (UniqueName: \"kubernetes.io/projected/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-kube-api-access-7kshh\") pod \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.158216 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-catalog-content\") pod \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.158292 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-utilities\") pod \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\" (UID: \"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc\") " Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.193533 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-utilities" (OuterVolumeSpecName: "utilities") pod "df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" (UID: "df7d88d5-88fb-4edb-aae6-5623f2e6f6bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.218057 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-kube-api-access-7kshh" (OuterVolumeSpecName: "kube-api-access-7kshh") pod "df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" (UID: "df7d88d5-88fb-4edb-aae6-5623f2e6f6bc"). InnerVolumeSpecName "kube-api-access-7kshh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.261695 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.261842 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kshh\" (UniqueName: \"kubernetes.io/projected/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-kube-api-access-7kshh\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.359590 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" (UID: "df7d88d5-88fb-4edb-aae6-5623f2e6f6bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.364391 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.585755 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wmt8w" event={"ID":"df7d88d5-88fb-4edb-aae6-5623f2e6f6bc","Type":"ContainerDied","Data":"ca7a209683fc259822445fe5abf48060bf4f206379da88df7f6ff422c0a9e417"} Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.585813 4780 scope.go:117] "RemoveContainer" containerID="694893effdd9d9f8acbc980e4eaa74344498e62666a9cf69958fc2d879117967" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.585855 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wmt8w" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.618146 4780 scope.go:117] "RemoveContainer" containerID="254853d6640a482670ff1e7b5568e92dab31de654e4298df88341b13c514c15d" Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.643635 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wmt8w"] Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.658802 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wmt8w"] Dec 10 11:43:20 crc kubenswrapper[4780]: I1210 11:43:20.670491 4780 scope.go:117] "RemoveContainer" containerID="14baf8fdf3169ad4d101361777d42a3e11eef5b68c5548e80538a2c910d3428e" Dec 10 11:43:21 crc kubenswrapper[4780]: I1210 11:43:21.974597 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" path="/var/lib/kubelet/pods/df7d88d5-88fb-4edb-aae6-5623f2e6f6bc/volumes" Dec 10 11:43:22 crc kubenswrapper[4780]: E1210 11:43:22.962393 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:43:23 crc kubenswrapper[4780]: E1210 11:43:23.964479 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:43:34 crc kubenswrapper[4780]: E1210 11:43:34.961383 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:43:35 crc kubenswrapper[4780]: E1210 11:43:35.967645 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:43:46 crc kubenswrapper[4780]: E1210 11:43:46.961426 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:43:48 crc kubenswrapper[4780]: E1210 11:43:48.962449 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:43:59 crc kubenswrapper[4780]: E1210 11:43:59.963061 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:44:03 crc kubenswrapper[4780]: E1210 11:44:03.962542 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:44:10 crc kubenswrapper[4780]: E1210 11:44:10.967470 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:44:15 crc kubenswrapper[4780]: E1210 11:44:15.973407 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:44:22 crc kubenswrapper[4780]: E1210 11:44:22.962904 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:44:29 crc kubenswrapper[4780]: E1210 11:44:29.963833 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:44:37 crc kubenswrapper[4780]: E1210 11:44:37.962764 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:44:42 crc kubenswrapper[4780]: E1210 11:44:42.961356 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:44:49 crc kubenswrapper[4780]: E1210 11:44:49.963936 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:44:55 crc kubenswrapper[4780]: E1210 11:44:55.973735 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:44:57 crc kubenswrapper[4780]: I1210 11:44:57.475953 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:44:57 crc kubenswrapper[4780]: I1210 11:44:57.476342 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.177659 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c"] Dec 10 11:45:00 crc kubenswrapper[4780]: E1210 11:45:00.179287 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerName="extract-utilities" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.179309 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerName="extract-utilities" Dec 10 11:45:00 crc kubenswrapper[4780]: E1210 11:45:00.179329 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerName="registry-server" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.179336 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerName="registry-server" Dec 10 11:45:00 crc kubenswrapper[4780]: E1210 11:45:00.179391 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerName="extract-content" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.179399 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerName="extract-content" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.179716 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="df7d88d5-88fb-4edb-aae6-5623f2e6f6bc" containerName="registry-server" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.181107 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.186305 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.187440 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.194069 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c"] Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.349448 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3245693f-ba76-410c-a3c7-4d6b6de92ed5-secret-volume\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.349588 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pmxk\" (UniqueName: \"kubernetes.io/projected/3245693f-ba76-410c-a3c7-4d6b6de92ed5-kube-api-access-5pmxk\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.349723 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3245693f-ba76-410c-a3c7-4d6b6de92ed5-config-volume\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.452884 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pmxk\" (UniqueName: \"kubernetes.io/projected/3245693f-ba76-410c-a3c7-4d6b6de92ed5-kube-api-access-5pmxk\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.453089 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3245693f-ba76-410c-a3c7-4d6b6de92ed5-config-volume\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.453248 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3245693f-ba76-410c-a3c7-4d6b6de92ed5-secret-volume\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.456062 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3245693f-ba76-410c-a3c7-4d6b6de92ed5-config-volume\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.468357 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3245693f-ba76-410c-a3c7-4d6b6de92ed5-secret-volume\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.479587 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pmxk\" (UniqueName: \"kubernetes.io/projected/3245693f-ba76-410c-a3c7-4d6b6de92ed5-kube-api-access-5pmxk\") pod \"collect-profiles-29422785-56b4c\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:00 crc kubenswrapper[4780]: I1210 11:45:00.554115 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:01 crc kubenswrapper[4780]: I1210 11:45:01.082875 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c"] Dec 10 11:45:01 crc kubenswrapper[4780]: I1210 11:45:01.373476 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" event={"ID":"3245693f-ba76-410c-a3c7-4d6b6de92ed5","Type":"ContainerStarted","Data":"8a71ed427ad0f05a5f6a5173eef1ead6d46753a4b26bfff1e364185a4c9c9083"} Dec 10 11:45:02 crc kubenswrapper[4780]: I1210 11:45:02.454807 4780 generic.go:334] "Generic (PLEG): container finished" podID="3245693f-ba76-410c-a3c7-4d6b6de92ed5" containerID="8587bf4f4a392d814fa8cadfb6dca551ae040ac5eca1cc89b8e0919f9de55f9d" exitCode=0 Dec 10 11:45:02 crc kubenswrapper[4780]: I1210 11:45:02.454972 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" event={"ID":"3245693f-ba76-410c-a3c7-4d6b6de92ed5","Type":"ContainerDied","Data":"8587bf4f4a392d814fa8cadfb6dca551ae040ac5eca1cc89b8e0919f9de55f9d"} Dec 10 11:45:02 crc kubenswrapper[4780]: E1210 11:45:02.963154 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.327218 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.490165 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" event={"ID":"3245693f-ba76-410c-a3c7-4d6b6de92ed5","Type":"ContainerDied","Data":"8a71ed427ad0f05a5f6a5173eef1ead6d46753a4b26bfff1e364185a4c9c9083"} Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.490237 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a71ed427ad0f05a5f6a5173eef1ead6d46753a4b26bfff1e364185a4c9c9083" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.490337 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.526218 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3245693f-ba76-410c-a3c7-4d6b6de92ed5-config-volume\") pod \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.526672 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3245693f-ba76-410c-a3c7-4d6b6de92ed5-secret-volume\") pod \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.527211 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5pmxk\" (UniqueName: \"kubernetes.io/projected/3245693f-ba76-410c-a3c7-4d6b6de92ed5-kube-api-access-5pmxk\") pod \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\" (UID: \"3245693f-ba76-410c-a3c7-4d6b6de92ed5\") " Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.527398 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3245693f-ba76-410c-a3c7-4d6b6de92ed5-config-volume" (OuterVolumeSpecName: "config-volume") pod "3245693f-ba76-410c-a3c7-4d6b6de92ed5" (UID: "3245693f-ba76-410c-a3c7-4d6b6de92ed5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.530937 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3245693f-ba76-410c-a3c7-4d6b6de92ed5-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.537078 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3245693f-ba76-410c-a3c7-4d6b6de92ed5-kube-api-access-5pmxk" (OuterVolumeSpecName: "kube-api-access-5pmxk") pod "3245693f-ba76-410c-a3c7-4d6b6de92ed5" (UID: "3245693f-ba76-410c-a3c7-4d6b6de92ed5"). InnerVolumeSpecName "kube-api-access-5pmxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.537399 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3245693f-ba76-410c-a3c7-4d6b6de92ed5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3245693f-ba76-410c-a3c7-4d6b6de92ed5" (UID: "3245693f-ba76-410c-a3c7-4d6b6de92ed5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.633160 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5pmxk\" (UniqueName: \"kubernetes.io/projected/3245693f-ba76-410c-a3c7-4d6b6de92ed5-kube-api-access-5pmxk\") on node \"crc\" DevicePath \"\"" Dec 10 11:45:04 crc kubenswrapper[4780]: I1210 11:45:04.633219 4780 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3245693f-ba76-410c-a3c7-4d6b6de92ed5-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:45:05 crc kubenswrapper[4780]: I1210 11:45:05.449933 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm"] Dec 10 11:45:05 crc kubenswrapper[4780]: I1210 11:45:05.462113 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-nsfxm"] Dec 10 11:45:06 crc kubenswrapper[4780]: I1210 11:45:06.003203 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0255c5ce-87c5-455f-a1e4-bca36b91e355" path="/var/lib/kubelet/pods/0255c5ce-87c5-455f-a1e4-bca36b91e355/volumes" Dec 10 11:45:06 crc kubenswrapper[4780]: E1210 11:45:06.962101 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:45:14 crc kubenswrapper[4780]: E1210 11:45:14.961584 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:45:20 crc kubenswrapper[4780]: E1210 11:45:20.963421 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:45:27 crc kubenswrapper[4780]: I1210 11:45:27.599401 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:45:27 crc kubenswrapper[4780]: I1210 11:45:27.600108 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:45:29 crc kubenswrapper[4780]: E1210 11:45:29.962663 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:45:34 crc kubenswrapper[4780]: E1210 11:45:34.962244 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:45:42 crc kubenswrapper[4780]: E1210 11:45:42.961766 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:45:46 crc kubenswrapper[4780]: I1210 11:45:46.810884 4780 scope.go:117] "RemoveContainer" containerID="0d630ef72fff6d41ee532c8e3abd532bf18aee81bfa73c14df75b031fd43d865" Dec 10 11:45:46 crc kubenswrapper[4780]: E1210 11:45:46.965163 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:45:57 crc kubenswrapper[4780]: I1210 11:45:57.476509 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:45:57 crc kubenswrapper[4780]: I1210 11:45:57.477346 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:45:57 crc kubenswrapper[4780]: I1210 11:45:57.477414 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:45:57 crc kubenswrapper[4780]: I1210 11:45:57.478819 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e15ab34e36ac32b53834cffe21875a8d05b051c4c02a437375344ed40ed90da1"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:45:57 crc kubenswrapper[4780]: I1210 11:45:57.478909 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://e15ab34e36ac32b53834cffe21875a8d05b051c4c02a437375344ed40ed90da1" gracePeriod=600 Dec 10 11:45:57 crc kubenswrapper[4780]: I1210 11:45:57.807338 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="e15ab34e36ac32b53834cffe21875a8d05b051c4c02a437375344ed40ed90da1" exitCode=0 Dec 10 11:45:57 crc kubenswrapper[4780]: I1210 11:45:57.807393 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"e15ab34e36ac32b53834cffe21875a8d05b051c4c02a437375344ed40ed90da1"} Dec 10 11:45:57 crc kubenswrapper[4780]: I1210 11:45:57.807856 4780 scope.go:117] "RemoveContainer" containerID="ad1d621b1a826491d785af0d16af5702804ee2ac6c2b34195a2b38cec7226ce1" Dec 10 11:45:58 crc kubenswrapper[4780]: E1210 11:45:58.068659 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:45:58 crc kubenswrapper[4780]: I1210 11:45:58.828185 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74"} Dec 10 11:45:58 crc kubenswrapper[4780]: E1210 11:45:58.963294 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:46:09 crc kubenswrapper[4780]: E1210 11:46:09.963370 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:46:10 crc kubenswrapper[4780]: E1210 11:46:10.961725 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:46:21 crc kubenswrapper[4780]: E1210 11:46:21.965031 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:46:23 crc kubenswrapper[4780]: E1210 11:46:23.961411 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:46:34 crc kubenswrapper[4780]: E1210 11:46:34.960848 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:46:36 crc kubenswrapper[4780]: I1210 11:46:36.821597 4780 generic.go:334] "Generic (PLEG): container finished" podID="56ce8299-173b-429b-b042-f78fb64b6a74" containerID="9339f08fdd8334a880bfb69d2a235ad11bfabaa40eeaa17cecfe00e66d96b0c1" exitCode=2 Dec 10 11:46:36 crc kubenswrapper[4780]: I1210 11:46:36.821686 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" event={"ID":"56ce8299-173b-429b-b042-f78fb64b6a74","Type":"ContainerDied","Data":"9339f08fdd8334a880bfb69d2a235ad11bfabaa40eeaa17cecfe00e66d96b0c1"} Dec 10 11:46:36 crc kubenswrapper[4780]: E1210 11:46:36.967654 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.403155 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.488966 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-inventory\") pod \"56ce8299-173b-429b-b042-f78fb64b6a74\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.489370 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqw6c\" (UniqueName: \"kubernetes.io/projected/56ce8299-173b-429b-b042-f78fb64b6a74-kube-api-access-nqw6c\") pod \"56ce8299-173b-429b-b042-f78fb64b6a74\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.489527 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-ssh-key\") pod \"56ce8299-173b-429b-b042-f78fb64b6a74\" (UID: \"56ce8299-173b-429b-b042-f78fb64b6a74\") " Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.503520 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56ce8299-173b-429b-b042-f78fb64b6a74-kube-api-access-nqw6c" (OuterVolumeSpecName: "kube-api-access-nqw6c") pod "56ce8299-173b-429b-b042-f78fb64b6a74" (UID: "56ce8299-173b-429b-b042-f78fb64b6a74"). InnerVolumeSpecName "kube-api-access-nqw6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.533469 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-inventory" (OuterVolumeSpecName: "inventory") pod "56ce8299-173b-429b-b042-f78fb64b6a74" (UID: "56ce8299-173b-429b-b042-f78fb64b6a74"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.538874 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "56ce8299-173b-429b-b042-f78fb64b6a74" (UID: "56ce8299-173b-429b-b042-f78fb64b6a74"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.597754 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqw6c\" (UniqueName: \"kubernetes.io/projected/56ce8299-173b-429b-b042-f78fb64b6a74-kube-api-access-nqw6c\") on node \"crc\" DevicePath \"\"" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.598349 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.598365 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56ce8299-173b-429b-b042-f78fb64b6a74-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.851179 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" event={"ID":"56ce8299-173b-429b-b042-f78fb64b6a74","Type":"ContainerDied","Data":"9d8f417be7b7e7658213537dbdc917c4a4d2e76449c3dd5bc05582f39fa5c220"} Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.851226 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d8f417be7b7e7658213537dbdc917c4a4d2e76449c3dd5bc05582f39fa5c220" Dec 10 11:46:38 crc kubenswrapper[4780]: I1210 11:46:38.851271 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm" Dec 10 11:46:48 crc kubenswrapper[4780]: E1210 11:46:48.962675 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:46:49 crc kubenswrapper[4780]: E1210 11:46:49.966500 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:47:03 crc kubenswrapper[4780]: I1210 11:47:03.166726 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:47:03 crc kubenswrapper[4780]: E1210 11:47:03.305248 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:47:03 crc kubenswrapper[4780]: E1210 11:47:03.305309 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:47:03 crc kubenswrapper[4780]: E1210 11:47:03.305854 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:47:03 crc kubenswrapper[4780]: E1210 11:47:03.307061 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:47:03 crc kubenswrapper[4780]: E1210 11:47:03.960522 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.055846 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww"] Dec 10 11:47:16 crc kubenswrapper[4780]: E1210 11:47:16.064258 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56ce8299-173b-429b-b042-f78fb64b6a74" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.064351 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="56ce8299-173b-429b-b042-f78fb64b6a74" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:47:16 crc kubenswrapper[4780]: E1210 11:47:16.064402 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3245693f-ba76-410c-a3c7-4d6b6de92ed5" containerName="collect-profiles" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.064416 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="3245693f-ba76-410c-a3c7-4d6b6de92ed5" containerName="collect-profiles" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.064849 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="56ce8299-173b-429b-b042-f78fb64b6a74" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.064882 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="3245693f-ba76-410c-a3c7-4d6b6de92ed5" containerName="collect-profiles" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.066726 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.077164 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.077580 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.077861 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.078195 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.089853 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww"] Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.212105 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.212715 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wh72d\" (UniqueName: \"kubernetes.io/projected/305fe5d9-cca1-44f3-9ec9-034bde2b5434-kube-api-access-wh72d\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.212833 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.331347 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.331456 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wh72d\" (UniqueName: \"kubernetes.io/projected/305fe5d9-cca1-44f3-9ec9-034bde2b5434-kube-api-access-wh72d\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.331564 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.346836 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.356387 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.364339 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wh72d\" (UniqueName: \"kubernetes.io/projected/305fe5d9-cca1-44f3-9ec9-034bde2b5434-kube-api-access-wh72d\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-2jkww\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: I1210 11:47:16.411691 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:47:16 crc kubenswrapper[4780]: E1210 11:47:16.962903 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:47:17 crc kubenswrapper[4780]: I1210 11:47:17.123005 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww"] Dec 10 11:47:17 crc kubenswrapper[4780]: I1210 11:47:17.861315 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" event={"ID":"305fe5d9-cca1-44f3-9ec9-034bde2b5434","Type":"ContainerStarted","Data":"b466a2755bc4666c870b87d45b0c9327eba71dc374ff5a124705d0605ef8ae80"} Dec 10 11:47:18 crc kubenswrapper[4780]: E1210 11:47:18.095955 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:47:18 crc kubenswrapper[4780]: E1210 11:47:18.096040 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:47:18 crc kubenswrapper[4780]: E1210 11:47:18.096233 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:47:18 crc kubenswrapper[4780]: E1210 11:47:18.098126 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:47:18 crc kubenswrapper[4780]: I1210 11:47:18.907099 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" event={"ID":"305fe5d9-cca1-44f3-9ec9-034bde2b5434","Type":"ContainerStarted","Data":"830d2b3b4da925b56dd41d042a3c932ff94172dd9211742d21d9a48f86b2bcf2"} Dec 10 11:47:18 crc kubenswrapper[4780]: I1210 11:47:18.945575 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" podStartSLOduration=2.501062652 podStartE2EDuration="2.945537776s" podCreationTimestamp="2025-12-10 11:47:16 +0000 UTC" firstStartedPulling="2025-12-10 11:47:17.140491769 +0000 UTC m=+3741.993885212" lastFinishedPulling="2025-12-10 11:47:17.584966893 +0000 UTC m=+3742.438360336" observedRunningTime="2025-12-10 11:47:18.931301478 +0000 UTC m=+3743.784694941" watchObservedRunningTime="2025-12-10 11:47:18.945537776 +0000 UTC m=+3743.798931219" Dec 10 11:47:27 crc kubenswrapper[4780]: E1210 11:47:27.963042 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:47:30 crc kubenswrapper[4780]: E1210 11:47:30.964062 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:47:41 crc kubenswrapper[4780]: E1210 11:47:41.964006 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:47:42 crc kubenswrapper[4780]: E1210 11:47:42.961866 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:47:55 crc kubenswrapper[4780]: E1210 11:47:55.973794 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:47:56 crc kubenswrapper[4780]: E1210 11:47:56.961214 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:47:57 crc kubenswrapper[4780]: I1210 11:47:57.475847 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:47:57 crc kubenswrapper[4780]: I1210 11:47:57.476332 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:48:07 crc kubenswrapper[4780]: E1210 11:48:07.961362 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:48:10 crc kubenswrapper[4780]: E1210 11:48:10.962248 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:48:19 crc kubenswrapper[4780]: E1210 11:48:19.961484 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:48:22 crc kubenswrapper[4780]: E1210 11:48:22.960666 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:48:27 crc kubenswrapper[4780]: I1210 11:48:27.475470 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:48:27 crc kubenswrapper[4780]: I1210 11:48:27.477406 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:48:33 crc kubenswrapper[4780]: E1210 11:48:33.961681 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:48:34 crc kubenswrapper[4780]: E1210 11:48:34.961451 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:48:46 crc kubenswrapper[4780]: E1210 11:48:46.961141 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:48:48 crc kubenswrapper[4780]: E1210 11:48:48.961643 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:48:57 crc kubenswrapper[4780]: I1210 11:48:57.475825 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:48:57 crc kubenswrapper[4780]: I1210 11:48:57.476586 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:48:57 crc kubenswrapper[4780]: I1210 11:48:57.476657 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:48:57 crc kubenswrapper[4780]: I1210 11:48:57.477829 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:48:57 crc kubenswrapper[4780]: I1210 11:48:57.477890 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" gracePeriod=600 Dec 10 11:48:57 crc kubenswrapper[4780]: E1210 11:48:57.609360 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:48:57 crc kubenswrapper[4780]: E1210 11:48:57.965646 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:48:58 crc kubenswrapper[4780]: I1210 11:48:58.392960 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" exitCode=0 Dec 10 11:48:58 crc kubenswrapper[4780]: I1210 11:48:58.393032 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74"} Dec 10 11:48:58 crc kubenswrapper[4780]: I1210 11:48:58.393084 4780 scope.go:117] "RemoveContainer" containerID="e15ab34e36ac32b53834cffe21875a8d05b051c4c02a437375344ed40ed90da1" Dec 10 11:48:58 crc kubenswrapper[4780]: I1210 11:48:58.394330 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:48:58 crc kubenswrapper[4780]: E1210 11:48:58.394753 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:49:02 crc kubenswrapper[4780]: E1210 11:49:02.964834 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:49:09 crc kubenswrapper[4780]: I1210 11:49:09.961290 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:49:09 crc kubenswrapper[4780]: E1210 11:49:09.962796 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:49:10 crc kubenswrapper[4780]: E1210 11:49:10.964460 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:49:17 crc kubenswrapper[4780]: E1210 11:49:17.963347 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:49:21 crc kubenswrapper[4780]: I1210 11:49:21.959833 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:49:21 crc kubenswrapper[4780]: E1210 11:49:21.960613 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:49:22 crc kubenswrapper[4780]: E1210 11:49:22.075762 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:49:30 crc kubenswrapper[4780]: E1210 11:49:30.962615 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:49:34 crc kubenswrapper[4780]: I1210 11:49:34.962635 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:49:34 crc kubenswrapper[4780]: E1210 11:49:34.965349 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:49:35 crc kubenswrapper[4780]: E1210 11:49:35.975543 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:49:43 crc kubenswrapper[4780]: E1210 11:49:43.962748 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:49:49 crc kubenswrapper[4780]: I1210 11:49:49.959683 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:49:49 crc kubenswrapper[4780]: E1210 11:49:49.961175 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:49:50 crc kubenswrapper[4780]: E1210 11:49:50.961117 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:49:56 crc kubenswrapper[4780]: E1210 11:49:56.965509 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:50:01 crc kubenswrapper[4780]: I1210 11:50:01.959821 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:50:01 crc kubenswrapper[4780]: E1210 11:50:01.962136 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:50:05 crc kubenswrapper[4780]: E1210 11:50:05.975703 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:50:09 crc kubenswrapper[4780]: E1210 11:50:09.963729 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:50:15 crc kubenswrapper[4780]: I1210 11:50:15.971537 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:50:15 crc kubenswrapper[4780]: E1210 11:50:15.973381 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:50:16 crc kubenswrapper[4780]: E1210 11:50:16.963708 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:50:21 crc kubenswrapper[4780]: E1210 11:50:21.971082 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:50:26 crc kubenswrapper[4780]: I1210 11:50:26.962119 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:50:26 crc kubenswrapper[4780]: E1210 11:50:26.963545 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:50:28 crc kubenswrapper[4780]: E1210 11:50:28.967238 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:50:36 crc kubenswrapper[4780]: E1210 11:50:36.964227 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:50:41 crc kubenswrapper[4780]: I1210 11:50:41.960577 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:50:41 crc kubenswrapper[4780]: E1210 11:50:41.961790 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:50:43 crc kubenswrapper[4780]: E1210 11:50:43.974412 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:50:52 crc kubenswrapper[4780]: E1210 11:50:52.112002 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:50:53 crc kubenswrapper[4780]: I1210 11:50:53.965580 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:50:53 crc kubenswrapper[4780]: E1210 11:50:53.987139 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:50:57 crc kubenswrapper[4780]: E1210 11:50:57.962754 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:51:04 crc kubenswrapper[4780]: E1210 11:51:04.970241 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:51:07 crc kubenswrapper[4780]: I1210 11:51:07.959529 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:51:07 crc kubenswrapper[4780]: E1210 11:51:07.960879 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:51:09 crc kubenswrapper[4780]: E1210 11:51:09.962235 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:51:17 crc kubenswrapper[4780]: E1210 11:51:17.965418 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:51:18 crc kubenswrapper[4780]: I1210 11:51:18.395438 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b" containerName="galera" probeResult="failure" output="command timed out" Dec 10 11:51:18 crc kubenswrapper[4780]: I1210 11:51:18.396667 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b" containerName="galera" probeResult="failure" output="command timed out" Dec 10 11:51:19 crc kubenswrapper[4780]: I1210 11:51:19.959685 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:51:19 crc kubenswrapper[4780]: E1210 11:51:19.960430 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:51:23 crc kubenswrapper[4780]: E1210 11:51:23.964288 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:51:30 crc kubenswrapper[4780]: I1210 11:51:30.960534 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:51:30 crc kubenswrapper[4780]: E1210 11:51:30.962356 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:51:31 crc kubenswrapper[4780]: E1210 11:51:31.963942 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:51:37 crc kubenswrapper[4780]: E1210 11:51:37.967882 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:51:41 crc kubenswrapper[4780]: I1210 11:51:41.960074 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:51:41 crc kubenswrapper[4780]: E1210 11:51:41.961577 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:51:44 crc kubenswrapper[4780]: E1210 11:51:44.963189 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:51:48 crc kubenswrapper[4780]: E1210 11:51:48.968496 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:51:55 crc kubenswrapper[4780]: I1210 11:51:55.973669 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:51:55 crc kubenswrapper[4780]: E1210 11:51:55.976720 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:51:56 crc kubenswrapper[4780]: E1210 11:51:56.964058 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:52:03 crc kubenswrapper[4780]: I1210 11:52:03.974822 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:52:04 crc kubenswrapper[4780]: E1210 11:52:04.111204 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:52:04 crc kubenswrapper[4780]: E1210 11:52:04.111957 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:52:04 crc kubenswrapper[4780]: E1210 11:52:04.112400 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:52:04 crc kubenswrapper[4780]: E1210 11:52:04.114176 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:52:09 crc kubenswrapper[4780]: E1210 11:52:09.963409 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:52:10 crc kubenswrapper[4780]: I1210 11:52:10.963172 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:52:10 crc kubenswrapper[4780]: E1210 11:52:10.964877 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:52:15 crc kubenswrapper[4780]: E1210 11:52:15.985389 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.639528 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c2wtl"] Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.646189 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.658449 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c2wtl"] Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.682058 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-utilities\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.682163 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv4vg\" (UniqueName: \"kubernetes.io/projected/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-kube-api-access-sv4vg\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.682337 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-catalog-content\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.785540 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-utilities\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.785685 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv4vg\" (UniqueName: \"kubernetes.io/projected/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-kube-api-access-sv4vg\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.786479 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-catalog-content\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.786829 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-utilities\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.787037 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-catalog-content\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:20 crc kubenswrapper[4780]: I1210 11:52:20.818401 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv4vg\" (UniqueName: \"kubernetes.io/projected/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-kube-api-access-sv4vg\") pod \"redhat-operators-c2wtl\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:21 crc kubenswrapper[4780]: I1210 11:52:21.005865 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:21 crc kubenswrapper[4780]: E1210 11:52:21.080554 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:52:21 crc kubenswrapper[4780]: E1210 11:52:21.080687 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:52:21 crc kubenswrapper[4780]: E1210 11:52:21.080956 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:52:21 crc kubenswrapper[4780]: E1210 11:52:21.082251 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:52:22 crc kubenswrapper[4780]: I1210 11:52:22.273201 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c2wtl"] Dec 10 11:52:22 crc kubenswrapper[4780]: I1210 11:52:22.984712 4780 generic.go:334] "Generic (PLEG): container finished" podID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerID="3e0e4901b2b3002e82de92ca0cdbc16661a3af6938445f62060662d48fb4dd35" exitCode=0 Dec 10 11:52:22 crc kubenswrapper[4780]: I1210 11:52:22.985406 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2wtl" event={"ID":"276fb168-4a0c-4b05-9d2e-f7b1c68ae605","Type":"ContainerDied","Data":"3e0e4901b2b3002e82de92ca0cdbc16661a3af6938445f62060662d48fb4dd35"} Dec 10 11:52:22 crc kubenswrapper[4780]: I1210 11:52:22.985496 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2wtl" event={"ID":"276fb168-4a0c-4b05-9d2e-f7b1c68ae605","Type":"ContainerStarted","Data":"2a060d5fdef665286b23a2bef01061e2031996032244949ecc45024975a6b446"} Dec 10 11:52:24 crc kubenswrapper[4780]: I1210 11:52:24.004860 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2wtl" event={"ID":"276fb168-4a0c-4b05-9d2e-f7b1c68ae605","Type":"ContainerStarted","Data":"8399a5dbccd6342e3e0b789afc48e83f1e8ae4755099af685f7ff2659cc3d958"} Dec 10 11:52:25 crc kubenswrapper[4780]: I1210 11:52:25.259000 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:52:25 crc kubenswrapper[4780]: E1210 11:52:25.260505 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:52:28 crc kubenswrapper[4780]: E1210 11:52:28.028658 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:52:28 crc kubenswrapper[4780]: I1210 11:52:28.850844 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vrj85"] Dec 10 11:52:28 crc kubenswrapper[4780]: I1210 11:52:28.856140 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:28 crc kubenswrapper[4780]: I1210 11:52:28.893447 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vrj85"] Dec 10 11:52:28 crc kubenswrapper[4780]: I1210 11:52:28.976994 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-utilities\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:28 crc kubenswrapper[4780]: I1210 11:52:28.977177 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7x4h\" (UniqueName: \"kubernetes.io/projected/5790baf0-35fa-404c-9d8b-340742a84da4-kube-api-access-k7x4h\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:28 crc kubenswrapper[4780]: I1210 11:52:28.977291 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-catalog-content\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:29 crc kubenswrapper[4780]: I1210 11:52:29.082731 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7x4h\" (UniqueName: \"kubernetes.io/projected/5790baf0-35fa-404c-9d8b-340742a84da4-kube-api-access-k7x4h\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:29 crc kubenswrapper[4780]: I1210 11:52:29.082874 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-catalog-content\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:29 crc kubenswrapper[4780]: I1210 11:52:29.083217 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-utilities\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:29 crc kubenswrapper[4780]: I1210 11:52:29.084348 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-utilities\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:29 crc kubenswrapper[4780]: I1210 11:52:29.084547 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-catalog-content\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:29 crc kubenswrapper[4780]: I1210 11:52:29.121469 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7x4h\" (UniqueName: \"kubernetes.io/projected/5790baf0-35fa-404c-9d8b-340742a84da4-kube-api-access-k7x4h\") pod \"community-operators-vrj85\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:29 crc kubenswrapper[4780]: I1210 11:52:29.206883 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:30 crc kubenswrapper[4780]: I1210 11:52:30.274024 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vrj85"] Dec 10 11:52:30 crc kubenswrapper[4780]: I1210 11:52:30.473277 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vrj85" event={"ID":"5790baf0-35fa-404c-9d8b-340742a84da4","Type":"ContainerStarted","Data":"aff7afeee2e90393a549cc98b739161306bee7e2d98aadae830f93387ed7c36c"} Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.230393 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dnw8l"] Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.236063 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.256812 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnw8l"] Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.267552 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8mr7\" (UniqueName: \"kubernetes.io/projected/32274447-5fbb-4b8f-8118-6bca50c5abec-kube-api-access-q8mr7\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.271074 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-catalog-content\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.271233 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-utilities\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.376009 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-catalog-content\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.376135 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-utilities\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.376385 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8mr7\" (UniqueName: \"kubernetes.io/projected/32274447-5fbb-4b8f-8118-6bca50c5abec-kube-api-access-q8mr7\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.378135 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-catalog-content\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.378537 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-utilities\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.460564 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8mr7\" (UniqueName: \"kubernetes.io/projected/32274447-5fbb-4b8f-8118-6bca50c5abec-kube-api-access-q8mr7\") pod \"redhat-marketplace-dnw8l\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:32 crc kubenswrapper[4780]: I1210 11:52:32.584937 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:33 crc kubenswrapper[4780]: I1210 11:52:33.334263 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnw8l"] Dec 10 11:52:33 crc kubenswrapper[4780]: E1210 11:52:33.962714 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:52:34 crc kubenswrapper[4780]: W1210 11:52:34.011701 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32274447_5fbb_4b8f_8118_6bca50c5abec.slice/crio-d41dd4d5b21a37f6cbaf9a22202ddfc89ffaaa6841f006aa1ae42e93710a75f0 WatchSource:0}: Error finding container d41dd4d5b21a37f6cbaf9a22202ddfc89ffaaa6841f006aa1ae42e93710a75f0: Status 404 returned error can't find the container with id d41dd4d5b21a37f6cbaf9a22202ddfc89ffaaa6841f006aa1ae42e93710a75f0 Dec 10 11:52:34 crc kubenswrapper[4780]: I1210 11:52:34.682514 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnw8l" event={"ID":"32274447-5fbb-4b8f-8118-6bca50c5abec","Type":"ContainerStarted","Data":"d41dd4d5b21a37f6cbaf9a22202ddfc89ffaaa6841f006aa1ae42e93710a75f0"} Dec 10 11:52:35 crc kubenswrapper[4780]: I1210 11:52:35.704201 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vrj85" event={"ID":"5790baf0-35fa-404c-9d8b-340742a84da4","Type":"ContainerStarted","Data":"055c01a7df2cd3b89a0260436599ee262898259f26368a1705fa38ab3f00c61d"} Dec 10 11:52:35 crc kubenswrapper[4780]: I1210 11:52:35.977996 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:52:35 crc kubenswrapper[4780]: E1210 11:52:35.979263 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:52:36 crc kubenswrapper[4780]: I1210 11:52:36.747105 4780 generic.go:334] "Generic (PLEG): container finished" podID="5790baf0-35fa-404c-9d8b-340742a84da4" containerID="055c01a7df2cd3b89a0260436599ee262898259f26368a1705fa38ab3f00c61d" exitCode=0 Dec 10 11:52:36 crc kubenswrapper[4780]: I1210 11:52:36.747342 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vrj85" event={"ID":"5790baf0-35fa-404c-9d8b-340742a84da4","Type":"ContainerDied","Data":"055c01a7df2cd3b89a0260436599ee262898259f26368a1705fa38ab3f00c61d"} Dec 10 11:52:36 crc kubenswrapper[4780]: I1210 11:52:36.758553 4780 generic.go:334] "Generic (PLEG): container finished" podID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerID="9a546bc8b030a10149504dbe2f0e49c3d1e9bc628c76670eace9a37fa4355a60" exitCode=0 Dec 10 11:52:36 crc kubenswrapper[4780]: I1210 11:52:36.758803 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnw8l" event={"ID":"32274447-5fbb-4b8f-8118-6bca50c5abec","Type":"ContainerDied","Data":"9a546bc8b030a10149504dbe2f0e49c3d1e9bc628c76670eace9a37fa4355a60"} Dec 10 11:52:36 crc kubenswrapper[4780]: I1210 11:52:36.770692 4780 generic.go:334] "Generic (PLEG): container finished" podID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerID="8399a5dbccd6342e3e0b789afc48e83f1e8ae4755099af685f7ff2659cc3d958" exitCode=0 Dec 10 11:52:36 crc kubenswrapper[4780]: I1210 11:52:36.770800 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2wtl" event={"ID":"276fb168-4a0c-4b05-9d2e-f7b1c68ae605","Type":"ContainerDied","Data":"8399a5dbccd6342e3e0b789afc48e83f1e8ae4755099af685f7ff2659cc3d958"} Dec 10 11:52:38 crc kubenswrapper[4780]: I1210 11:52:38.972192 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2wtl" event={"ID":"276fb168-4a0c-4b05-9d2e-f7b1c68ae605","Type":"ContainerStarted","Data":"d795ce00b8ea2448876f135da723c0c390018c6742e2f2e946f6a8de5bc81b2f"} Dec 10 11:52:40 crc kubenswrapper[4780]: I1210 11:52:40.019887 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnw8l" event={"ID":"32274447-5fbb-4b8f-8118-6bca50c5abec","Type":"ContainerStarted","Data":"e1c75122c76f44b42d456a86f8cc05b84bdc6c5f28c846321af36478618d4866"} Dec 10 11:52:40 crc kubenswrapper[4780]: I1210 11:52:40.026912 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vrj85" event={"ID":"5790baf0-35fa-404c-9d8b-340742a84da4","Type":"ContainerStarted","Data":"43c8f345dc69f38d94f21fe9e33eba12fcff2e2a39056b1c273367d69efd828b"} Dec 10 11:52:40 crc kubenswrapper[4780]: I1210 11:52:40.084989 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c2wtl" podStartSLOduration=5.742955028 podStartE2EDuration="20.0848962s" podCreationTimestamp="2025-12-10 11:52:20 +0000 UTC" firstStartedPulling="2025-12-10 11:52:22.996433968 +0000 UTC m=+4047.849827411" lastFinishedPulling="2025-12-10 11:52:37.33837514 +0000 UTC m=+4062.191768583" observedRunningTime="2025-12-10 11:52:40.084385057 +0000 UTC m=+4064.937778520" watchObservedRunningTime="2025-12-10 11:52:40.0848962 +0000 UTC m=+4064.938289633" Dec 10 11:52:41 crc kubenswrapper[4780]: I1210 11:52:41.183191 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:41 crc kubenswrapper[4780]: I1210 11:52:41.911900 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:52:42 crc kubenswrapper[4780]: E1210 11:52:42.964052 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:52:42 crc kubenswrapper[4780]: I1210 11:52:42.994348 4780 generic.go:334] "Generic (PLEG): container finished" podID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerID="e1c75122c76f44b42d456a86f8cc05b84bdc6c5f28c846321af36478618d4866" exitCode=0 Dec 10 11:52:42 crc kubenswrapper[4780]: I1210 11:52:42.995173 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnw8l" event={"ID":"32274447-5fbb-4b8f-8118-6bca50c5abec","Type":"ContainerDied","Data":"e1c75122c76f44b42d456a86f8cc05b84bdc6c5f28c846321af36478618d4866"} Dec 10 11:52:43 crc kubenswrapper[4780]: I1210 11:52:43.024963 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c2wtl" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="registry-server" probeResult="failure" output=< Dec 10 11:52:43 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:52:43 crc kubenswrapper[4780]: > Dec 10 11:52:44 crc kubenswrapper[4780]: I1210 11:52:44.026051 4780 generic.go:334] "Generic (PLEG): container finished" podID="5790baf0-35fa-404c-9d8b-340742a84da4" containerID="43c8f345dc69f38d94f21fe9e33eba12fcff2e2a39056b1c273367d69efd828b" exitCode=0 Dec 10 11:52:44 crc kubenswrapper[4780]: I1210 11:52:44.027632 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vrj85" event={"ID":"5790baf0-35fa-404c-9d8b-340742a84da4","Type":"ContainerDied","Data":"43c8f345dc69f38d94f21fe9e33eba12fcff2e2a39056b1c273367d69efd828b"} Dec 10 11:52:45 crc kubenswrapper[4780]: I1210 11:52:45.058498 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnw8l" event={"ID":"32274447-5fbb-4b8f-8118-6bca50c5abec","Type":"ContainerStarted","Data":"5c0f5f1e293fe20da3d13cd3d2d59933efbda2ef8cedc54096a5acaf09754dcf"} Dec 10 11:52:45 crc kubenswrapper[4780]: I1210 11:52:45.100352 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dnw8l" podStartSLOduration=6.307470014 podStartE2EDuration="13.100308642s" podCreationTimestamp="2025-12-10 11:52:32 +0000 UTC" firstStartedPulling="2025-12-10 11:52:36.766796246 +0000 UTC m=+4061.620189689" lastFinishedPulling="2025-12-10 11:52:43.559634874 +0000 UTC m=+4068.413028317" observedRunningTime="2025-12-10 11:52:45.097233364 +0000 UTC m=+4069.950626807" watchObservedRunningTime="2025-12-10 11:52:45.100308642 +0000 UTC m=+4069.953702095" Dec 10 11:52:46 crc kubenswrapper[4780]: I1210 11:52:46.959793 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:52:46 crc kubenswrapper[4780]: E1210 11:52:46.962275 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:52:47 crc kubenswrapper[4780]: E1210 11:52:47.965354 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:52:51 crc kubenswrapper[4780]: I1210 11:52:51.177320 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vrj85" event={"ID":"5790baf0-35fa-404c-9d8b-340742a84da4","Type":"ContainerStarted","Data":"f96adccd99e1d6f9595dee645cd7fa5b62afa038ddc38e0fabc5e4e2aa4e3018"} Dec 10 11:52:51 crc kubenswrapper[4780]: I1210 11:52:51.245902 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vrj85" podStartSLOduration=9.916859209 podStartE2EDuration="23.245861751s" podCreationTimestamp="2025-12-10 11:52:28 +0000 UTC" firstStartedPulling="2025-12-10 11:52:36.752341279 +0000 UTC m=+4061.605734732" lastFinishedPulling="2025-12-10 11:52:50.081343831 +0000 UTC m=+4074.934737274" observedRunningTime="2025-12-10 11:52:51.235493107 +0000 UTC m=+4076.088886550" watchObservedRunningTime="2025-12-10 11:52:51.245861751 +0000 UTC m=+4076.099255194" Dec 10 11:52:52 crc kubenswrapper[4780]: I1210 11:52:52.163463 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c2wtl" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="registry-server" probeResult="failure" output=< Dec 10 11:52:52 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:52:52 crc kubenswrapper[4780]: > Dec 10 11:52:52 crc kubenswrapper[4780]: I1210 11:52:52.586159 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:52 crc kubenswrapper[4780]: I1210 11:52:52.586251 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:52 crc kubenswrapper[4780]: I1210 11:52:52.680260 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:53 crc kubenswrapper[4780]: I1210 11:52:53.291180 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:53 crc kubenswrapper[4780]: I1210 11:52:53.454807 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnw8l"] Dec 10 11:52:54 crc kubenswrapper[4780]: E1210 11:52:54.403590 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:52:55 crc kubenswrapper[4780]: I1210 11:52:55.466452 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dnw8l" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerName="registry-server" containerID="cri-o://5c0f5f1e293fe20da3d13cd3d2d59933efbda2ef8cedc54096a5acaf09754dcf" gracePeriod=2 Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.516639 4780 generic.go:334] "Generic (PLEG): container finished" podID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerID="5c0f5f1e293fe20da3d13cd3d2d59933efbda2ef8cedc54096a5acaf09754dcf" exitCode=0 Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.516807 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnw8l" event={"ID":"32274447-5fbb-4b8f-8118-6bca50c5abec","Type":"ContainerDied","Data":"5c0f5f1e293fe20da3d13cd3d2d59933efbda2ef8cedc54096a5acaf09754dcf"} Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.517767 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dnw8l" event={"ID":"32274447-5fbb-4b8f-8118-6bca50c5abec","Type":"ContainerDied","Data":"d41dd4d5b21a37f6cbaf9a22202ddfc89ffaaa6841f006aa1ae42e93710a75f0"} Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.517791 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d41dd4d5b21a37f6cbaf9a22202ddfc89ffaaa6841f006aa1ae42e93710a75f0" Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.603245 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.709399 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-catalog-content\") pod \"32274447-5fbb-4b8f-8118-6bca50c5abec\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.709528 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8mr7\" (UniqueName: \"kubernetes.io/projected/32274447-5fbb-4b8f-8118-6bca50c5abec-kube-api-access-q8mr7\") pod \"32274447-5fbb-4b8f-8118-6bca50c5abec\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.709667 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-utilities\") pod \"32274447-5fbb-4b8f-8118-6bca50c5abec\" (UID: \"32274447-5fbb-4b8f-8118-6bca50c5abec\") " Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.712223 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-utilities" (OuterVolumeSpecName: "utilities") pod "32274447-5fbb-4b8f-8118-6bca50c5abec" (UID: "32274447-5fbb-4b8f-8118-6bca50c5abec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.723480 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32274447-5fbb-4b8f-8118-6bca50c5abec-kube-api-access-q8mr7" (OuterVolumeSpecName: "kube-api-access-q8mr7") pod "32274447-5fbb-4b8f-8118-6bca50c5abec" (UID: "32274447-5fbb-4b8f-8118-6bca50c5abec"). InnerVolumeSpecName "kube-api-access-q8mr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.763969 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "32274447-5fbb-4b8f-8118-6bca50c5abec" (UID: "32274447-5fbb-4b8f-8118-6bca50c5abec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.815831 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.815956 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8mr7\" (UniqueName: \"kubernetes.io/projected/32274447-5fbb-4b8f-8118-6bca50c5abec-kube-api-access-q8mr7\") on node \"crc\" DevicePath \"\"" Dec 10 11:52:57 crc kubenswrapper[4780]: I1210 11:52:57.815975 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32274447-5fbb-4b8f-8118-6bca50c5abec-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:52:58 crc kubenswrapper[4780]: I1210 11:52:58.537805 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dnw8l" Dec 10 11:52:58 crc kubenswrapper[4780]: I1210 11:52:58.628060 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnw8l"] Dec 10 11:52:58 crc kubenswrapper[4780]: I1210 11:52:58.646088 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dnw8l"] Dec 10 11:52:59 crc kubenswrapper[4780]: I1210 11:52:59.207208 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:59 crc kubenswrapper[4780]: I1210 11:52:59.207767 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:59 crc kubenswrapper[4780]: I1210 11:52:59.276564 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:59 crc kubenswrapper[4780]: I1210 11:52:59.625721 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:52:59 crc kubenswrapper[4780]: I1210 11:52:59.962212 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:52:59 crc kubenswrapper[4780]: E1210 11:52:59.962723 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:52:59 crc kubenswrapper[4780]: I1210 11:52:59.983712 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" path="/var/lib/kubelet/pods/32274447-5fbb-4b8f-8118-6bca50c5abec/volumes" Dec 10 11:53:00 crc kubenswrapper[4780]: I1210 11:53:00.861478 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vrj85"] Dec 10 11:53:01 crc kubenswrapper[4780]: I1210 11:53:01.587031 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vrj85" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" containerName="registry-server" containerID="cri-o://f96adccd99e1d6f9595dee645cd7fa5b62afa038ddc38e0fabc5e4e2aa4e3018" gracePeriod=2 Dec 10 11:53:01 crc kubenswrapper[4780]: E1210 11:53:01.964198 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:53:02 crc kubenswrapper[4780]: I1210 11:53:02.082666 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c2wtl" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="registry-server" probeResult="failure" output=< Dec 10 11:53:02 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 11:53:02 crc kubenswrapper[4780]: > Dec 10 11:53:02 crc kubenswrapper[4780]: I1210 11:53:02.648708 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vrj85" event={"ID":"5790baf0-35fa-404c-9d8b-340742a84da4","Type":"ContainerDied","Data":"f96adccd99e1d6f9595dee645cd7fa5b62afa038ddc38e0fabc5e4e2aa4e3018"} Dec 10 11:53:02 crc kubenswrapper[4780]: I1210 11:53:02.648665 4780 generic.go:334] "Generic (PLEG): container finished" podID="5790baf0-35fa-404c-9d8b-340742a84da4" containerID="f96adccd99e1d6f9595dee645cd7fa5b62afa038ddc38e0fabc5e4e2aa4e3018" exitCode=0 Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.533742 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.691856 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7x4h\" (UniqueName: \"kubernetes.io/projected/5790baf0-35fa-404c-9d8b-340742a84da4-kube-api-access-k7x4h\") pod \"5790baf0-35fa-404c-9d8b-340742a84da4\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.692179 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-utilities\") pod \"5790baf0-35fa-404c-9d8b-340742a84da4\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.692220 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-catalog-content\") pod \"5790baf0-35fa-404c-9d8b-340742a84da4\" (UID: \"5790baf0-35fa-404c-9d8b-340742a84da4\") " Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.693892 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-utilities" (OuterVolumeSpecName: "utilities") pod "5790baf0-35fa-404c-9d8b-340742a84da4" (UID: "5790baf0-35fa-404c-9d8b-340742a84da4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.704336 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vrj85" event={"ID":"5790baf0-35fa-404c-9d8b-340742a84da4","Type":"ContainerDied","Data":"aff7afeee2e90393a549cc98b739161306bee7e2d98aadae830f93387ed7c36c"} Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.704451 4780 scope.go:117] "RemoveContainer" containerID="f96adccd99e1d6f9595dee645cd7fa5b62afa038ddc38e0fabc5e4e2aa4e3018" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.704592 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vrj85" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.725454 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5790baf0-35fa-404c-9d8b-340742a84da4-kube-api-access-k7x4h" (OuterVolumeSpecName: "kube-api-access-k7x4h") pod "5790baf0-35fa-404c-9d8b-340742a84da4" (UID: "5790baf0-35fa-404c-9d8b-340742a84da4"). InnerVolumeSpecName "kube-api-access-k7x4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.815350 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.815427 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7x4h\" (UniqueName: \"kubernetes.io/projected/5790baf0-35fa-404c-9d8b-340742a84da4-kube-api-access-k7x4h\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.847212 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5790baf0-35fa-404c-9d8b-340742a84da4" (UID: "5790baf0-35fa-404c-9d8b-340742a84da4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.877776 4780 scope.go:117] "RemoveContainer" containerID="43c8f345dc69f38d94f21fe9e33eba12fcff2e2a39056b1c273367d69efd828b" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.920295 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5790baf0-35fa-404c-9d8b-340742a84da4-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:03 crc kubenswrapper[4780]: I1210 11:53:03.935542 4780 scope.go:117] "RemoveContainer" containerID="055c01a7df2cd3b89a0260436599ee262898259f26368a1705fa38ab3f00c61d" Dec 10 11:53:04 crc kubenswrapper[4780]: I1210 11:53:04.105343 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vrj85"] Dec 10 11:53:04 crc kubenswrapper[4780]: I1210 11:53:04.123863 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vrj85"] Dec 10 11:53:05 crc kubenswrapper[4780]: I1210 11:53:05.974779 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" path="/var/lib/kubelet/pods/5790baf0-35fa-404c-9d8b-340742a84da4/volumes" Dec 10 11:53:05 crc kubenswrapper[4780]: E1210 11:53:05.978562 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.318603 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-b29w5"] Dec 10 11:53:07 crc kubenswrapper[4780]: E1210 11:53:07.322542 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" containerName="extract-utilities" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.322597 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" containerName="extract-utilities" Dec 10 11:53:07 crc kubenswrapper[4780]: E1210 11:53:07.322637 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerName="registry-server" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.322650 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerName="registry-server" Dec 10 11:53:07 crc kubenswrapper[4780]: E1210 11:53:07.322667 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" containerName="registry-server" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.322677 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" containerName="registry-server" Dec 10 11:53:07 crc kubenswrapper[4780]: E1210 11:53:07.322696 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerName="extract-content" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.322704 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerName="extract-content" Dec 10 11:53:07 crc kubenswrapper[4780]: E1210 11:53:07.322745 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" containerName="extract-content" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.322756 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" containerName="extract-content" Dec 10 11:53:07 crc kubenswrapper[4780]: E1210 11:53:07.322793 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerName="extract-utilities" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.322803 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerName="extract-utilities" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.323364 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="32274447-5fbb-4b8f-8118-6bca50c5abec" containerName="registry-server" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.323392 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="5790baf0-35fa-404c-9d8b-340742a84da4" containerName="registry-server" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.328451 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.354014 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b29w5"] Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.383824 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-utilities\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.383980 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-catalog-content\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.384061 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdtpm\" (UniqueName: \"kubernetes.io/projected/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-kube-api-access-zdtpm\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.488205 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-utilities\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.488406 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-catalog-content\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.488495 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdtpm\" (UniqueName: \"kubernetes.io/projected/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-kube-api-access-zdtpm\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.489458 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-utilities\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.489726 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-catalog-content\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.515989 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdtpm\" (UniqueName: \"kubernetes.io/projected/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-kube-api-access-zdtpm\") pod \"certified-operators-b29w5\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:07 crc kubenswrapper[4780]: I1210 11:53:07.668438 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:08 crc kubenswrapper[4780]: I1210 11:53:08.606397 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b29w5"] Dec 10 11:53:08 crc kubenswrapper[4780]: W1210 11:53:08.620772 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddfa93e73_9105_4ad4_a2c2_94cabb8926ba.slice/crio-2a951a11b502501f688fcd74a871fafab42f6a720e5516f46680054c5a3476ea WatchSource:0}: Error finding container 2a951a11b502501f688fcd74a871fafab42f6a720e5516f46680054c5a3476ea: Status 404 returned error can't find the container with id 2a951a11b502501f688fcd74a871fafab42f6a720e5516f46680054c5a3476ea Dec 10 11:53:08 crc kubenswrapper[4780]: I1210 11:53:08.793465 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b29w5" event={"ID":"dfa93e73-9105-4ad4-a2c2-94cabb8926ba","Type":"ContainerStarted","Data":"2a951a11b502501f688fcd74a871fafab42f6a720e5516f46680054c5a3476ea"} Dec 10 11:53:09 crc kubenswrapper[4780]: I1210 11:53:09.815747 4780 generic.go:334] "Generic (PLEG): container finished" podID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerID="f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029" exitCode=0 Dec 10 11:53:09 crc kubenswrapper[4780]: I1210 11:53:09.816441 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b29w5" event={"ID":"dfa93e73-9105-4ad4-a2c2-94cabb8926ba","Type":"ContainerDied","Data":"f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029"} Dec 10 11:53:11 crc kubenswrapper[4780]: I1210 11:53:11.073135 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:53:11 crc kubenswrapper[4780]: I1210 11:53:11.137391 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:53:12 crc kubenswrapper[4780]: I1210 11:53:12.459506 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c2wtl"] Dec 10 11:53:12 crc kubenswrapper[4780]: I1210 11:53:12.859340 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c2wtl" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="registry-server" containerID="cri-o://d795ce00b8ea2448876f135da723c0c390018c6742e2f2e946f6a8de5bc81b2f" gracePeriod=2 Dec 10 11:53:12 crc kubenswrapper[4780]: I1210 11:53:12.960358 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:53:12 crc kubenswrapper[4780]: E1210 11:53:12.961480 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:53:13 crc kubenswrapper[4780]: I1210 11:53:13.882843 4780 generic.go:334] "Generic (PLEG): container finished" podID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerID="d795ce00b8ea2448876f135da723c0c390018c6742e2f2e946f6a8de5bc81b2f" exitCode=0 Dec 10 11:53:13 crc kubenswrapper[4780]: I1210 11:53:13.882972 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2wtl" event={"ID":"276fb168-4a0c-4b05-9d2e-f7b1c68ae605","Type":"ContainerDied","Data":"d795ce00b8ea2448876f135da723c0c390018c6742e2f2e946f6a8de5bc81b2f"} Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.711831 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.740941 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-catalog-content\") pod \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.741242 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-utilities\") pod \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.743299 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-utilities" (OuterVolumeSpecName: "utilities") pod "276fb168-4a0c-4b05-9d2e-f7b1c68ae605" (UID: "276fb168-4a0c-4b05-9d2e-f7b1c68ae605"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.845039 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sv4vg\" (UniqueName: \"kubernetes.io/projected/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-kube-api-access-sv4vg\") pod \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\" (UID: \"276fb168-4a0c-4b05-9d2e-f7b1c68ae605\") " Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.846583 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.852717 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-kube-api-access-sv4vg" (OuterVolumeSpecName: "kube-api-access-sv4vg") pod "276fb168-4a0c-4b05-9d2e-f7b1c68ae605" (UID: "276fb168-4a0c-4b05-9d2e-f7b1c68ae605"). InnerVolumeSpecName "kube-api-access-sv4vg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.884596 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "276fb168-4a0c-4b05-9d2e-f7b1c68ae605" (UID: "276fb168-4a0c-4b05-9d2e-f7b1c68ae605"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.943779 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2wtl" event={"ID":"276fb168-4a0c-4b05-9d2e-f7b1c68ae605","Type":"ContainerDied","Data":"2a060d5fdef665286b23a2bef01061e2031996032244949ecc45024975a6b446"} Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.943883 4780 scope.go:117] "RemoveContainer" containerID="d795ce00b8ea2448876f135da723c0c390018c6742e2f2e946f6a8de5bc81b2f" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.943881 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2wtl" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.950386 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.951710 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sv4vg\" (UniqueName: \"kubernetes.io/projected/276fb168-4a0c-4b05-9d2e-f7b1c68ae605-kube-api-access-sv4vg\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:15 crc kubenswrapper[4780]: I1210 11:53:15.998308 4780 scope.go:117] "RemoveContainer" containerID="8399a5dbccd6342e3e0b789afc48e83f1e8ae4755099af685f7ff2659cc3d958" Dec 10 11:53:16 crc kubenswrapper[4780]: I1210 11:53:16.016130 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c2wtl"] Dec 10 11:53:16 crc kubenswrapper[4780]: I1210 11:53:16.031852 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c2wtl"] Dec 10 11:53:16 crc kubenswrapper[4780]: I1210 11:53:16.058508 4780 scope.go:117] "RemoveContainer" containerID="3e0e4901b2b3002e82de92ca0cdbc16661a3af6938445f62060662d48fb4dd35" Dec 10 11:53:16 crc kubenswrapper[4780]: E1210 11:53:16.966777 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:53:16 crc kubenswrapper[4780]: E1210 11:53:16.967904 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:53:17 crc kubenswrapper[4780]: I1210 11:53:17.980891 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" path="/var/lib/kubelet/pods/276fb168-4a0c-4b05-9d2e-f7b1c68ae605/volumes" Dec 10 11:53:17 crc kubenswrapper[4780]: I1210 11:53:17.987606 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b29w5" event={"ID":"dfa93e73-9105-4ad4-a2c2-94cabb8926ba","Type":"ContainerStarted","Data":"f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d"} Dec 10 11:53:19 crc kubenswrapper[4780]: I1210 11:53:19.002997 4780 generic.go:334] "Generic (PLEG): container finished" podID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerID="f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d" exitCode=0 Dec 10 11:53:19 crc kubenswrapper[4780]: I1210 11:53:19.003111 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b29w5" event={"ID":"dfa93e73-9105-4ad4-a2c2-94cabb8926ba","Type":"ContainerDied","Data":"f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d"} Dec 10 11:53:21 crc kubenswrapper[4780]: I1210 11:53:21.040162 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b29w5" event={"ID":"dfa93e73-9105-4ad4-a2c2-94cabb8926ba","Type":"ContainerStarted","Data":"aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362"} Dec 10 11:53:21 crc kubenswrapper[4780]: I1210 11:53:21.084635 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-b29w5" podStartSLOduration=3.857750548 podStartE2EDuration="14.084591304s" podCreationTimestamp="2025-12-10 11:53:07 +0000 UTC" firstStartedPulling="2025-12-10 11:53:09.822866095 +0000 UTC m=+4094.676259538" lastFinishedPulling="2025-12-10 11:53:20.049706851 +0000 UTC m=+4104.903100294" observedRunningTime="2025-12-10 11:53:21.067497332 +0000 UTC m=+4105.920890785" watchObservedRunningTime="2025-12-10 11:53:21.084591304 +0000 UTC m=+4105.937984747" Dec 10 11:53:24 crc kubenswrapper[4780]: I1210 11:53:24.959426 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:53:24 crc kubenswrapper[4780]: E1210 11:53:24.960824 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:53:27 crc kubenswrapper[4780]: I1210 11:53:27.669092 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:27 crc kubenswrapper[4780]: I1210 11:53:27.670311 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:27 crc kubenswrapper[4780]: I1210 11:53:27.730548 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:28 crc kubenswrapper[4780]: I1210 11:53:28.287601 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:28 crc kubenswrapper[4780]: I1210 11:53:28.379310 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b29w5"] Dec 10 11:53:29 crc kubenswrapper[4780]: E1210 11:53:29.969362 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:53:30 crc kubenswrapper[4780]: I1210 11:53:30.248591 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-b29w5" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerName="registry-server" containerID="cri-o://aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362" gracePeriod=2 Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:30.928266 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.081784 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-catalog-content\") pod \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.081896 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdtpm\" (UniqueName: \"kubernetes.io/projected/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-kube-api-access-zdtpm\") pod \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.082257 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-utilities\") pod \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\" (UID: \"dfa93e73-9105-4ad4-a2c2-94cabb8926ba\") " Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.084039 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-utilities" (OuterVolumeSpecName: "utilities") pod "dfa93e73-9105-4ad4-a2c2-94cabb8926ba" (UID: "dfa93e73-9105-4ad4-a2c2-94cabb8926ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.094626 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-kube-api-access-zdtpm" (OuterVolumeSpecName: "kube-api-access-zdtpm") pod "dfa93e73-9105-4ad4-a2c2-94cabb8926ba" (UID: "dfa93e73-9105-4ad4-a2c2-94cabb8926ba"). InnerVolumeSpecName "kube-api-access-zdtpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.157640 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dfa93e73-9105-4ad4-a2c2-94cabb8926ba" (UID: "dfa93e73-9105-4ad4-a2c2-94cabb8926ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.187324 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.187380 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.187399 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdtpm\" (UniqueName: \"kubernetes.io/projected/dfa93e73-9105-4ad4-a2c2-94cabb8926ba-kube-api-access-zdtpm\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.267000 4780 generic.go:334] "Generic (PLEG): container finished" podID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerID="aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362" exitCode=0 Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.267075 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b29w5" event={"ID":"dfa93e73-9105-4ad4-a2c2-94cabb8926ba","Type":"ContainerDied","Data":"aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362"} Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.267118 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b29w5" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.267130 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b29w5" event={"ID":"dfa93e73-9105-4ad4-a2c2-94cabb8926ba","Type":"ContainerDied","Data":"2a951a11b502501f688fcd74a871fafab42f6a720e5516f46680054c5a3476ea"} Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.267148 4780 scope.go:117] "RemoveContainer" containerID="aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.321573 4780 scope.go:117] "RemoveContainer" containerID="f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.331505 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b29w5"] Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.346039 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-b29w5"] Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.356683 4780 scope.go:117] "RemoveContainer" containerID="f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.438252 4780 scope.go:117] "RemoveContainer" containerID="aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362" Dec 10 11:53:34 crc kubenswrapper[4780]: E1210 11:53:31.439862 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362\": container with ID starting with aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362 not found: ID does not exist" containerID="aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.439960 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362"} err="failed to get container status \"aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362\": rpc error: code = NotFound desc = could not find container \"aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362\": container with ID starting with aaf16a4372cd97d031ed8b8729bacc2ae52b8348f92df061dc4bc8c07df60362 not found: ID does not exist" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.440003 4780 scope.go:117] "RemoveContainer" containerID="f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d" Dec 10 11:53:34 crc kubenswrapper[4780]: E1210 11:53:31.440729 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d\": container with ID starting with f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d not found: ID does not exist" containerID="f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.440774 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d"} err="failed to get container status \"f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d\": rpc error: code = NotFound desc = could not find container \"f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d\": container with ID starting with f587e9d32c312f4ba00080449e3c082fd042e8ee29ed63eddcc3594624cae38d not found: ID does not exist" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.440809 4780 scope.go:117] "RemoveContainer" containerID="f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029" Dec 10 11:53:34 crc kubenswrapper[4780]: E1210 11:53:31.441368 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029\": container with ID starting with f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029 not found: ID does not exist" containerID="f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.441401 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029"} err="failed to get container status \"f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029\": rpc error: code = NotFound desc = could not find container \"f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029\": container with ID starting with f81576792ea1d0bcfe79c27fba5b387c72b320d2f82fe8a7290ff467fe5d6029 not found: ID does not exist" Dec 10 11:53:34 crc kubenswrapper[4780]: E1210 11:53:31.965130 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:53:34 crc kubenswrapper[4780]: I1210 11:53:31.976721 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" path="/var/lib/kubelet/pods/dfa93e73-9105-4ad4-a2c2-94cabb8926ba/volumes" Dec 10 11:53:38 crc kubenswrapper[4780]: I1210 11:53:38.960725 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:53:38 crc kubenswrapper[4780]: E1210 11:53:38.965782 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:53:40 crc kubenswrapper[4780]: E1210 11:53:40.963083 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:53:45 crc kubenswrapper[4780]: I1210 11:53:45.490422 4780 generic.go:334] "Generic (PLEG): container finished" podID="305fe5d9-cca1-44f3-9ec9-034bde2b5434" containerID="830d2b3b4da925b56dd41d042a3c932ff94172dd9211742d21d9a48f86b2bcf2" exitCode=2 Dec 10 11:53:45 crc kubenswrapper[4780]: I1210 11:53:45.490511 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" event={"ID":"305fe5d9-cca1-44f3-9ec9-034bde2b5434","Type":"ContainerDied","Data":"830d2b3b4da925b56dd41d042a3c932ff94172dd9211742d21d9a48f86b2bcf2"} Dec 10 11:53:45 crc kubenswrapper[4780]: E1210 11:53:45.972571 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.212303 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.398495 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-inventory\") pod \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.398643 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wh72d\" (UniqueName: \"kubernetes.io/projected/305fe5d9-cca1-44f3-9ec9-034bde2b5434-kube-api-access-wh72d\") pod \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.398771 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-ssh-key\") pod \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\" (UID: \"305fe5d9-cca1-44f3-9ec9-034bde2b5434\") " Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.409452 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/305fe5d9-cca1-44f3-9ec9-034bde2b5434-kube-api-access-wh72d" (OuterVolumeSpecName: "kube-api-access-wh72d") pod "305fe5d9-cca1-44f3-9ec9-034bde2b5434" (UID: "305fe5d9-cca1-44f3-9ec9-034bde2b5434"). InnerVolumeSpecName "kube-api-access-wh72d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.447743 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "305fe5d9-cca1-44f3-9ec9-034bde2b5434" (UID: "305fe5d9-cca1-44f3-9ec9-034bde2b5434"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.450275 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-inventory" (OuterVolumeSpecName: "inventory") pod "305fe5d9-cca1-44f3-9ec9-034bde2b5434" (UID: "305fe5d9-cca1-44f3-9ec9-034bde2b5434"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.505373 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.505431 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wh72d\" (UniqueName: \"kubernetes.io/projected/305fe5d9-cca1-44f3-9ec9-034bde2b5434-kube-api-access-wh72d\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.505452 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/305fe5d9-cca1-44f3-9ec9-034bde2b5434-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.528177 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" event={"ID":"305fe5d9-cca1-44f3-9ec9-034bde2b5434","Type":"ContainerDied","Data":"b466a2755bc4666c870b87d45b0c9327eba71dc374ff5a124705d0605ef8ae80"} Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.528238 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-2jkww" Dec 10 11:53:47 crc kubenswrapper[4780]: I1210 11:53:47.528256 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b466a2755bc4666c870b87d45b0c9327eba71dc374ff5a124705d0605ef8ae80" Dec 10 11:53:49 crc kubenswrapper[4780]: I1210 11:53:49.959617 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:53:49 crc kubenswrapper[4780]: E1210 11:53:49.960752 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 11:53:54 crc kubenswrapper[4780]: E1210 11:53:54.963783 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:53:57 crc kubenswrapper[4780]: E1210 11:53:57.964485 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:54:04 crc kubenswrapper[4780]: I1210 11:54:04.960722 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:54:05 crc kubenswrapper[4780]: E1210 11:54:05.996807 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:54:06 crc kubenswrapper[4780]: I1210 11:54:06.884062 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"79c82738d4445cbb6863cb284580498cbe6daf8f8365d829871cc33ad576fcd9"} Dec 10 11:54:09 crc kubenswrapper[4780]: E1210 11:54:09.963012 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:54:18 crc kubenswrapper[4780]: E1210 11:54:18.974442 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:54:22 crc kubenswrapper[4780]: E1210 11:54:22.966017 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:54:29 crc kubenswrapper[4780]: E1210 11:54:29.963383 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:54:35 crc kubenswrapper[4780]: E1210 11:54:35.143791 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:54:44 crc kubenswrapper[4780]: E1210 11:54:44.962714 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:54:47 crc kubenswrapper[4780]: E1210 11:54:47.961931 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:54:58 crc kubenswrapper[4780]: E1210 11:54:58.964332 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:54:59 crc kubenswrapper[4780]: E1210 11:54:59.962527 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.054337 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx"] Dec 10 11:55:04 crc kubenswrapper[4780]: E1210 11:55:04.055827 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="305fe5d9-cca1-44f3-9ec9-034bde2b5434" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.055847 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="305fe5d9-cca1-44f3-9ec9-034bde2b5434" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:55:04 crc kubenswrapper[4780]: E1210 11:55:04.055869 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="extract-content" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.055876 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="extract-content" Dec 10 11:55:04 crc kubenswrapper[4780]: E1210 11:55:04.055891 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerName="extract-content" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.055900 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerName="extract-content" Dec 10 11:55:04 crc kubenswrapper[4780]: E1210 11:55:04.055965 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="registry-server" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.055972 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="registry-server" Dec 10 11:55:04 crc kubenswrapper[4780]: E1210 11:55:04.055996 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="extract-utilities" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.056005 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="extract-utilities" Dec 10 11:55:04 crc kubenswrapper[4780]: E1210 11:55:04.056015 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerName="registry-server" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.056023 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerName="registry-server" Dec 10 11:55:04 crc kubenswrapper[4780]: E1210 11:55:04.056052 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerName="extract-utilities" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.056061 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerName="extract-utilities" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.056365 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="276fb168-4a0c-4b05-9d2e-f7b1c68ae605" containerName="registry-server" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.056400 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="305fe5d9-cca1-44f3-9ec9-034bde2b5434" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.056431 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfa93e73-9105-4ad4-a2c2-94cabb8926ba" containerName="registry-server" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.058073 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.063021 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.064458 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.065113 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.065655 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.074573 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx"] Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.174322 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.175321 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.176099 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhzf7\" (UniqueName: \"kubernetes.io/projected/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-kube-api-access-xhzf7\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.281346 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.282327 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhzf7\" (UniqueName: \"kubernetes.io/projected/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-kube-api-access-xhzf7\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.282683 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.294069 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.304877 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.307328 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhzf7\" (UniqueName: \"kubernetes.io/projected/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-kube-api-access-xhzf7\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-p6drx\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:04 crc kubenswrapper[4780]: I1210 11:55:04.399772 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 11:55:05 crc kubenswrapper[4780]: I1210 11:55:05.082885 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx"] Dec 10 11:55:05 crc kubenswrapper[4780]: I1210 11:55:05.143841 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" event={"ID":"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9","Type":"ContainerStarted","Data":"ff18133d17d7fdef10f0c35c2599de769c427de47dbd71eadda5e5ea0b1ce3d8"} Dec 10 11:55:08 crc kubenswrapper[4780]: I1210 11:55:08.195899 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" event={"ID":"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9","Type":"ContainerStarted","Data":"71adf83129d018112b6ac0b92f898aebad54c9c24ce8231afd7f53ea879de1d0"} Dec 10 11:55:08 crc kubenswrapper[4780]: I1210 11:55:08.232086 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" podStartSLOduration=1.920410286 podStartE2EDuration="4.232056246s" podCreationTimestamp="2025-12-10 11:55:04 +0000 UTC" firstStartedPulling="2025-12-10 11:55:05.105056773 +0000 UTC m=+4209.958450216" lastFinishedPulling="2025-12-10 11:55:07.416702733 +0000 UTC m=+4212.270096176" observedRunningTime="2025-12-10 11:55:08.222285989 +0000 UTC m=+4213.075679432" watchObservedRunningTime="2025-12-10 11:55:08.232056246 +0000 UTC m=+4213.085449689" Dec 10 11:55:13 crc kubenswrapper[4780]: E1210 11:55:13.109072 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:55:13 crc kubenswrapper[4780]: E1210 11:55:13.110122 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:55:24 crc kubenswrapper[4780]: E1210 11:55:24.962741 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:55:25 crc kubenswrapper[4780]: E1210 11:55:25.985610 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:55:37 crc kubenswrapper[4780]: E1210 11:55:37.964203 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:55:40 crc kubenswrapper[4780]: E1210 11:55:40.963221 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:55:51 crc kubenswrapper[4780]: E1210 11:55:51.967629 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:55:53 crc kubenswrapper[4780]: E1210 11:55:53.963340 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:56:04 crc kubenswrapper[4780]: E1210 11:56:04.963189 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:56:05 crc kubenswrapper[4780]: E1210 11:56:05.979345 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:56:19 crc kubenswrapper[4780]: E1210 11:56:19.976543 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:56:20 crc kubenswrapper[4780]: E1210 11:56:20.963850 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:56:27 crc kubenswrapper[4780]: I1210 11:56:27.475814 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:56:27 crc kubenswrapper[4780]: I1210 11:56:27.476742 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:56:34 crc kubenswrapper[4780]: E1210 11:56:34.963255 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:56:34 crc kubenswrapper[4780]: E1210 11:56:34.963869 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:56:45 crc kubenswrapper[4780]: I1210 11:56:45.398690 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="bb29164d-ab20-4069-88fd-3e44aaf2548e" containerName="prometheus" probeResult="failure" output="command timed out" Dec 10 11:56:45 crc kubenswrapper[4780]: I1210 11:56:45.398760 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="bb29164d-ab20-4069-88fd-3e44aaf2548e" containerName="prometheus" probeResult="failure" output="command timed out" Dec 10 11:56:46 crc kubenswrapper[4780]: E1210 11:56:46.126687 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:56:49 crc kubenswrapper[4780]: E1210 11:56:49.962748 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:56:57 crc kubenswrapper[4780]: I1210 11:56:57.475683 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:56:57 crc kubenswrapper[4780]: I1210 11:56:57.476617 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:57:00 crc kubenswrapper[4780]: E1210 11:57:00.963729 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:57:00 crc kubenswrapper[4780]: E1210 11:57:00.963780 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:57:06 crc kubenswrapper[4780]: I1210 11:57:06.029043 4780 trace.go:236] Trace[1352073288]: "Calculate volume metrics of wal for pod openshift-logging/logging-loki-ingester-0" (10-Dec-2025 11:57:04.227) (total time: 1801ms): Dec 10 11:57:06 crc kubenswrapper[4780]: Trace[1352073288]: [1.801273968s] [1.801273968s] END Dec 10 11:57:06 crc kubenswrapper[4780]: I1210 11:57:06.049450 4780 trace.go:236] Trace[1723411511]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-compactor-0" (10-Dec-2025 11:56:43.300) (total time: 22749ms): Dec 10 11:57:06 crc kubenswrapper[4780]: Trace[1723411511]: [22.749356415s] [22.749356415s] END Dec 10 11:57:11 crc kubenswrapper[4780]: I1210 11:57:11.965599 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:57:12 crc kubenswrapper[4780]: E1210 11:57:12.093728 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:57:12 crc kubenswrapper[4780]: E1210 11:57:12.093824 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 11:57:12 crc kubenswrapper[4780]: E1210 11:57:12.094144 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:57:12 crc kubenswrapper[4780]: E1210 11:57:12.095831 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:57:13 crc kubenswrapper[4780]: E1210 11:57:13.963569 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:57:24 crc kubenswrapper[4780]: E1210 11:57:24.963526 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:57:27 crc kubenswrapper[4780]: I1210 11:57:27.476210 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:57:27 crc kubenswrapper[4780]: I1210 11:57:27.476903 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:57:27 crc kubenswrapper[4780]: I1210 11:57:27.477045 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 11:57:27 crc kubenswrapper[4780]: I1210 11:57:27.479202 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"79c82738d4445cbb6863cb284580498cbe6daf8f8365d829871cc33ad576fcd9"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:57:27 crc kubenswrapper[4780]: I1210 11:57:27.479295 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://79c82738d4445cbb6863cb284580498cbe6daf8f8365d829871cc33ad576fcd9" gracePeriod=600 Dec 10 11:57:28 crc kubenswrapper[4780]: I1210 11:57:28.839522 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="79c82738d4445cbb6863cb284580498cbe6daf8f8365d829871cc33ad576fcd9" exitCode=0 Dec 10 11:57:28 crc kubenswrapper[4780]: I1210 11:57:28.839632 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"79c82738d4445cbb6863cb284580498cbe6daf8f8365d829871cc33ad576fcd9"} Dec 10 11:57:28 crc kubenswrapper[4780]: I1210 11:57:28.841074 4780 scope.go:117] "RemoveContainer" containerID="ef7d6e0e4daf35faca4d81cd35d964d90bb2353686969b711ed29b95292d5f74" Dec 10 11:57:29 crc kubenswrapper[4780]: E1210 11:57:29.092446 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:57:29 crc kubenswrapper[4780]: E1210 11:57:29.092565 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:57:29 crc kubenswrapper[4780]: E1210 11:57:29.092825 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:57:29 crc kubenswrapper[4780]: E1210 11:57:29.095062 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:57:29 crc kubenswrapper[4780]: I1210 11:57:29.942357 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74"} Dec 10 11:57:36 crc kubenswrapper[4780]: E1210 11:57:36.963574 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:57:44 crc kubenswrapper[4780]: E1210 11:57:44.966193 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:57:49 crc kubenswrapper[4780]: E1210 11:57:49.961431 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:57:56 crc kubenswrapper[4780]: E1210 11:57:56.077902 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:58:00 crc kubenswrapper[4780]: E1210 11:58:00.962596 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:58:09 crc kubenswrapper[4780]: E1210 11:58:09.963384 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:58:12 crc kubenswrapper[4780]: E1210 11:58:12.965257 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:58:20 crc kubenswrapper[4780]: E1210 11:58:20.963175 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:58:23 crc kubenswrapper[4780]: E1210 11:58:23.965582 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:58:34 crc kubenswrapper[4780]: E1210 11:58:34.964114 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:58:36 crc kubenswrapper[4780]: E1210 11:58:36.962756 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:58:47 crc kubenswrapper[4780]: I1210 11:58:47.410342 4780 scope.go:117] "RemoveContainer" containerID="e1c75122c76f44b42d456a86f8cc05b84bdc6c5f28c846321af36478618d4866" Dec 10 11:58:47 crc kubenswrapper[4780]: I1210 11:58:47.447589 4780 scope.go:117] "RemoveContainer" containerID="5c0f5f1e293fe20da3d13cd3d2d59933efbda2ef8cedc54096a5acaf09754dcf" Dec 10 11:58:47 crc kubenswrapper[4780]: I1210 11:58:47.516288 4780 scope.go:117] "RemoveContainer" containerID="9a546bc8b030a10149504dbe2f0e49c3d1e9bc628c76670eace9a37fa4355a60" Dec 10 11:58:48 crc kubenswrapper[4780]: E1210 11:58:48.961812 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:58:49 crc kubenswrapper[4780]: E1210 11:58:49.962621 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:59:03 crc kubenswrapper[4780]: E1210 11:59:03.023528 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:59:03 crc kubenswrapper[4780]: E1210 11:59:03.024077 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:59:13 crc kubenswrapper[4780]: E1210 11:59:13.964990 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:59:15 crc kubenswrapper[4780]: E1210 11:59:15.976796 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:59:24 crc kubenswrapper[4780]: E1210 11:59:24.963328 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:59:28 crc kubenswrapper[4780]: E1210 11:59:28.963828 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:59:39 crc kubenswrapper[4780]: E1210 11:59:39.964763 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:59:42 crc kubenswrapper[4780]: E1210 11:59:42.962304 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 11:59:54 crc kubenswrapper[4780]: E1210 11:59:54.964559 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 11:59:57 crc kubenswrapper[4780]: I1210 11:59:57.476116 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:59:57 crc kubenswrapper[4780]: I1210 11:59:57.477250 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:59:57 crc kubenswrapper[4780]: E1210 11:59:57.963419 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.187677 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z"] Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.190077 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.192720 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.196111 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.224046 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z"] Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.299308 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6nnr\" (UniqueName: \"kubernetes.io/projected/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-kube-api-access-x6nnr\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.299426 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-config-volume\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.299453 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-secret-volume\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.402597 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6nnr\" (UniqueName: \"kubernetes.io/projected/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-kube-api-access-x6nnr\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.402729 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-config-volume\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.402764 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-secret-volume\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.404527 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-config-volume\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.412302 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-secret-volume\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.425152 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6nnr\" (UniqueName: \"kubernetes.io/projected/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-kube-api-access-x6nnr\") pod \"collect-profiles-29422800-hnr6z\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:00 crc kubenswrapper[4780]: I1210 12:00:00.525523 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:01 crc kubenswrapper[4780]: I1210 12:00:01.091322 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z"] Dec 10 12:00:01 crc kubenswrapper[4780]: W1210 12:00:01.094450 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95e4fd49_638f_4adf_bd4e_f83fc4fe6876.slice/crio-cafd0604a0f40bcd19fc0cdc73232108f5e4497890e713cf10c13dcd4fdd326a WatchSource:0}: Error finding container cafd0604a0f40bcd19fc0cdc73232108f5e4497890e713cf10c13dcd4fdd326a: Status 404 returned error can't find the container with id cafd0604a0f40bcd19fc0cdc73232108f5e4497890e713cf10c13dcd4fdd326a Dec 10 12:00:02 crc kubenswrapper[4780]: I1210 12:00:02.097089 4780 generic.go:334] "Generic (PLEG): container finished" podID="95e4fd49-638f-4adf-bd4e-f83fc4fe6876" containerID="592cb3e63065665d08c7f07b03a325ed2bd3d0063bf8d60bc3bd0a44f731af21" exitCode=0 Dec 10 12:00:02 crc kubenswrapper[4780]: I1210 12:00:02.097229 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" event={"ID":"95e4fd49-638f-4adf-bd4e-f83fc4fe6876","Type":"ContainerDied","Data":"592cb3e63065665d08c7f07b03a325ed2bd3d0063bf8d60bc3bd0a44f731af21"} Dec 10 12:00:02 crc kubenswrapper[4780]: I1210 12:00:02.097564 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" event={"ID":"95e4fd49-638f-4adf-bd4e-f83fc4fe6876","Type":"ContainerStarted","Data":"cafd0604a0f40bcd19fc0cdc73232108f5e4497890e713cf10c13dcd4fdd326a"} Dec 10 12:00:03 crc kubenswrapper[4780]: I1210 12:00:03.799048 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:03 crc kubenswrapper[4780]: I1210 12:00:03.964853 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6nnr\" (UniqueName: \"kubernetes.io/projected/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-kube-api-access-x6nnr\") pod \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " Dec 10 12:00:03 crc kubenswrapper[4780]: I1210 12:00:03.965563 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-secret-volume\") pod \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " Dec 10 12:00:03 crc kubenswrapper[4780]: I1210 12:00:03.965832 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-config-volume\") pod \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\" (UID: \"95e4fd49-638f-4adf-bd4e-f83fc4fe6876\") " Dec 10 12:00:03 crc kubenswrapper[4780]: I1210 12:00:03.966991 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-config-volume" (OuterVolumeSpecName: "config-volume") pod "95e4fd49-638f-4adf-bd4e-f83fc4fe6876" (UID: "95e4fd49-638f-4adf-bd4e-f83fc4fe6876"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 12:00:03 crc kubenswrapper[4780]: I1210 12:00:03.974937 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "95e4fd49-638f-4adf-bd4e-f83fc4fe6876" (UID: "95e4fd49-638f-4adf-bd4e-f83fc4fe6876"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:00:03 crc kubenswrapper[4780]: I1210 12:00:03.977890 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-kube-api-access-x6nnr" (OuterVolumeSpecName: "kube-api-access-x6nnr") pod "95e4fd49-638f-4adf-bd4e-f83fc4fe6876" (UID: "95e4fd49-638f-4adf-bd4e-f83fc4fe6876"). InnerVolumeSpecName "kube-api-access-x6nnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:00:04 crc kubenswrapper[4780]: I1210 12:00:04.071668 4780 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:00:04 crc kubenswrapper[4780]: I1210 12:00:04.071723 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:00:04 crc kubenswrapper[4780]: I1210 12:00:04.071740 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6nnr\" (UniqueName: \"kubernetes.io/projected/95e4fd49-638f-4adf-bd4e-f83fc4fe6876-kube-api-access-x6nnr\") on node \"crc\" DevicePath \"\"" Dec 10 12:00:04 crc kubenswrapper[4780]: I1210 12:00:04.135745 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" event={"ID":"95e4fd49-638f-4adf-bd4e-f83fc4fe6876","Type":"ContainerDied","Data":"cafd0604a0f40bcd19fc0cdc73232108f5e4497890e713cf10c13dcd4fdd326a"} Dec 10 12:00:04 crc kubenswrapper[4780]: I1210 12:00:04.135820 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cafd0604a0f40bcd19fc0cdc73232108f5e4497890e713cf10c13dcd4fdd326a" Dec 10 12:00:04 crc kubenswrapper[4780]: I1210 12:00:04.136037 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-hnr6z" Dec 10 12:00:04 crc kubenswrapper[4780]: I1210 12:00:04.915855 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k"] Dec 10 12:00:04 crc kubenswrapper[4780]: I1210 12:00:04.929821 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-2ck6k"] Dec 10 12:00:05 crc kubenswrapper[4780]: E1210 12:00:05.976829 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:00:05 crc kubenswrapper[4780]: I1210 12:00:05.981633 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c026cff-71ab-4b14-87af-38c8f42ccac1" path="/var/lib/kubelet/pods/9c026cff-71ab-4b14-87af-38c8f42ccac1/volumes" Dec 10 12:00:11 crc kubenswrapper[4780]: E1210 12:00:11.962503 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:00:18 crc kubenswrapper[4780]: E1210 12:00:18.961899 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:00:25 crc kubenswrapper[4780]: E1210 12:00:25.962882 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:00:27 crc kubenswrapper[4780]: I1210 12:00:27.476687 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:00:27 crc kubenswrapper[4780]: I1210 12:00:27.476803 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:00:32 crc kubenswrapper[4780]: E1210 12:00:32.962898 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:00:36 crc kubenswrapper[4780]: E1210 12:00:36.963015 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:00:46 crc kubenswrapper[4780]: E1210 12:00:46.964004 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:00:47 crc kubenswrapper[4780]: I1210 12:00:47.633379 4780 scope.go:117] "RemoveContainer" containerID="c2bb03a116004b6fb136a2cfb449d8105933373c20832a5e610ff0f957121103" Dec 10 12:00:48 crc kubenswrapper[4780]: E1210 12:00:48.965800 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:00:57 crc kubenswrapper[4780]: I1210 12:00:57.475467 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:00:57 crc kubenswrapper[4780]: I1210 12:00:57.476137 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:00:57 crc kubenswrapper[4780]: I1210 12:00:57.476204 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 12:00:57 crc kubenswrapper[4780]: I1210 12:00:57.477662 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:00:57 crc kubenswrapper[4780]: I1210 12:00:57.477749 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" gracePeriod=600 Dec 10 12:00:57 crc kubenswrapper[4780]: E1210 12:00:57.634839 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:00:58 crc kubenswrapper[4780]: I1210 12:00:58.159678 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" exitCode=0 Dec 10 12:00:58 crc kubenswrapper[4780]: I1210 12:00:58.159731 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74"} Dec 10 12:00:58 crc kubenswrapper[4780]: I1210 12:00:58.159775 4780 scope.go:117] "RemoveContainer" containerID="79c82738d4445cbb6863cb284580498cbe6daf8f8365d829871cc33ad576fcd9" Dec 10 12:00:58 crc kubenswrapper[4780]: I1210 12:00:58.160472 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:00:58 crc kubenswrapper[4780]: E1210 12:00:58.160906 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:00:59 crc kubenswrapper[4780]: E1210 12:00:59.963953 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.171950 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29422801-mp9nl"] Dec 10 12:01:00 crc kubenswrapper[4780]: E1210 12:01:00.173132 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95e4fd49-638f-4adf-bd4e-f83fc4fe6876" containerName="collect-profiles" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.173178 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="95e4fd49-638f-4adf-bd4e-f83fc4fe6876" containerName="collect-profiles" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.173597 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="95e4fd49-638f-4adf-bd4e-f83fc4fe6876" containerName="collect-profiles" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.175202 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.197015 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-combined-ca-bundle\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.197103 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-fernet-keys\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.197424 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-config-data\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.197454 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf6l9\" (UniqueName: \"kubernetes.io/projected/b1af2bed-664f-49ec-b2f3-7b835900ee5f-kube-api-access-pf6l9\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.235387 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29422801-mp9nl"] Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.300865 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-combined-ca-bundle\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.300948 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-fernet-keys\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.301170 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-config-data\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.301199 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf6l9\" (UniqueName: \"kubernetes.io/projected/b1af2bed-664f-49ec-b2f3-7b835900ee5f-kube-api-access-pf6l9\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.310800 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-fernet-keys\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.311225 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-combined-ca-bundle\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.312394 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-config-data\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.323466 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf6l9\" (UniqueName: \"kubernetes.io/projected/b1af2bed-664f-49ec-b2f3-7b835900ee5f-kube-api-access-pf6l9\") pod \"keystone-cron-29422801-mp9nl\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:00 crc kubenswrapper[4780]: I1210 12:01:00.507048 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:01 crc kubenswrapper[4780]: I1210 12:01:01.098493 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29422801-mp9nl"] Dec 10 12:01:01 crc kubenswrapper[4780]: I1210 12:01:01.439732 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422801-mp9nl" event={"ID":"b1af2bed-664f-49ec-b2f3-7b835900ee5f","Type":"ContainerStarted","Data":"de26a82801372e04480ffdf02f8082680d697303e4717c102f903f7185135803"} Dec 10 12:01:02 crc kubenswrapper[4780]: I1210 12:01:02.457520 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422801-mp9nl" event={"ID":"b1af2bed-664f-49ec-b2f3-7b835900ee5f","Type":"ContainerStarted","Data":"c07b3a990775cd500d9c7b2ec7ae3d1c6b2768f0df458e1e622dd4cf55fa28ee"} Dec 10 12:01:02 crc kubenswrapper[4780]: I1210 12:01:02.481409 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29422801-mp9nl" podStartSLOduration=2.481384487 podStartE2EDuration="2.481384487s" podCreationTimestamp="2025-12-10 12:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 12:01:02.48109836 +0000 UTC m=+4567.334491813" watchObservedRunningTime="2025-12-10 12:01:02.481384487 +0000 UTC m=+4567.334777930" Dec 10 12:01:03 crc kubenswrapper[4780]: E1210 12:01:03.962388 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:01:08 crc kubenswrapper[4780]: I1210 12:01:08.551671 4780 generic.go:334] "Generic (PLEG): container finished" podID="b1af2bed-664f-49ec-b2f3-7b835900ee5f" containerID="c07b3a990775cd500d9c7b2ec7ae3d1c6b2768f0df458e1e622dd4cf55fa28ee" exitCode=0 Dec 10 12:01:08 crc kubenswrapper[4780]: I1210 12:01:08.551783 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422801-mp9nl" event={"ID":"b1af2bed-664f-49ec-b2f3-7b835900ee5f","Type":"ContainerDied","Data":"c07b3a990775cd500d9c7b2ec7ae3d1c6b2768f0df458e1e622dd4cf55fa28ee"} Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.113476 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:01:10 crc kubenswrapper[4780]: E1210 12:01:10.114456 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.385518 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.521800 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-fernet-keys\") pod \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.521870 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-combined-ca-bundle\") pod \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.522096 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pf6l9\" (UniqueName: \"kubernetes.io/projected/b1af2bed-664f-49ec-b2f3-7b835900ee5f-kube-api-access-pf6l9\") pod \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.522416 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-config-data\") pod \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\" (UID: \"b1af2bed-664f-49ec-b2f3-7b835900ee5f\") " Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.532105 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1af2bed-664f-49ec-b2f3-7b835900ee5f-kube-api-access-pf6l9" (OuterVolumeSpecName: "kube-api-access-pf6l9") pod "b1af2bed-664f-49ec-b2f3-7b835900ee5f" (UID: "b1af2bed-664f-49ec-b2f3-7b835900ee5f"). InnerVolumeSpecName "kube-api-access-pf6l9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.532887 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b1af2bed-664f-49ec-b2f3-7b835900ee5f" (UID: "b1af2bed-664f-49ec-b2f3-7b835900ee5f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.569835 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1af2bed-664f-49ec-b2f3-7b835900ee5f" (UID: "b1af2bed-664f-49ec-b2f3-7b835900ee5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.589230 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422801-mp9nl" event={"ID":"b1af2bed-664f-49ec-b2f3-7b835900ee5f","Type":"ContainerDied","Data":"de26a82801372e04480ffdf02f8082680d697303e4717c102f903f7185135803"} Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.589295 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de26a82801372e04480ffdf02f8082680d697303e4717c102f903f7185135803" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.589300 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422801-mp9nl" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.627913 4780 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.627977 4780 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.627995 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pf6l9\" (UniqueName: \"kubernetes.io/projected/b1af2bed-664f-49ec-b2f3-7b835900ee5f-kube-api-access-pf6l9\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.637415 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-config-data" (OuterVolumeSpecName: "config-data") pod "b1af2bed-664f-49ec-b2f3-7b835900ee5f" (UID: "b1af2bed-664f-49ec-b2f3-7b835900ee5f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:01:10 crc kubenswrapper[4780]: I1210 12:01:10.731231 4780 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1af2bed-664f-49ec-b2f3-7b835900ee5f-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:15 crc kubenswrapper[4780]: E1210 12:01:15.963568 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:01:15 crc kubenswrapper[4780]: E1210 12:01:15.965223 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:01:23 crc kubenswrapper[4780]: I1210 12:01:23.961110 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:01:23 crc kubenswrapper[4780]: E1210 12:01:23.962123 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:01:28 crc kubenswrapper[4780]: E1210 12:01:28.962818 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:01:28 crc kubenswrapper[4780]: E1210 12:01:28.963308 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:01:37 crc kubenswrapper[4780]: I1210 12:01:37.827619 4780 generic.go:334] "Generic (PLEG): container finished" podID="fa1fb8b2-2d6f-489e-8d87-3c88d49262b9" containerID="71adf83129d018112b6ac0b92f898aebad54c9c24ce8231afd7f53ea879de1d0" exitCode=2 Dec 10 12:01:37 crc kubenswrapper[4780]: I1210 12:01:37.827731 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" event={"ID":"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9","Type":"ContainerDied","Data":"71adf83129d018112b6ac0b92f898aebad54c9c24ce8231afd7f53ea879de1d0"} Dec 10 12:01:37 crc kubenswrapper[4780]: I1210 12:01:37.960892 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:01:37 crc kubenswrapper[4780]: E1210 12:01:37.962028 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.438166 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.505566 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhzf7\" (UniqueName: \"kubernetes.io/projected/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-kube-api-access-xhzf7\") pod \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.506519 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-ssh-key\") pod \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.506904 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-inventory\") pod \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\" (UID: \"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9\") " Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.530327 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-kube-api-access-xhzf7" (OuterVolumeSpecName: "kube-api-access-xhzf7") pod "fa1fb8b2-2d6f-489e-8d87-3c88d49262b9" (UID: "fa1fb8b2-2d6f-489e-8d87-3c88d49262b9"). InnerVolumeSpecName "kube-api-access-xhzf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.548952 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-inventory" (OuterVolumeSpecName: "inventory") pod "fa1fb8b2-2d6f-489e-8d87-3c88d49262b9" (UID: "fa1fb8b2-2d6f-489e-8d87-3c88d49262b9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.560963 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fa1fb8b2-2d6f-489e-8d87-3c88d49262b9" (UID: "fa1fb8b2-2d6f-489e-8d87-3c88d49262b9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.610107 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhzf7\" (UniqueName: \"kubernetes.io/projected/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-kube-api-access-xhzf7\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.610165 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.610184 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa1fb8b2-2d6f-489e-8d87-3c88d49262b9-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.858077 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" event={"ID":"fa1fb8b2-2d6f-489e-8d87-3c88d49262b9","Type":"ContainerDied","Data":"ff18133d17d7fdef10f0c35c2599de769c427de47dbd71eadda5e5ea0b1ce3d8"} Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.858184 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff18133d17d7fdef10f0c35c2599de769c427de47dbd71eadda5e5ea0b1ce3d8" Dec 10 12:01:39 crc kubenswrapper[4780]: I1210 12:01:39.858200 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-p6drx" Dec 10 12:01:41 crc kubenswrapper[4780]: E1210 12:01:41.967191 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:01:42 crc kubenswrapper[4780]: E1210 12:01:42.963129 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:01:49 crc kubenswrapper[4780]: I1210 12:01:49.959887 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:01:49 crc kubenswrapper[4780]: E1210 12:01:49.961184 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:01:53 crc kubenswrapper[4780]: E1210 12:01:53.963555 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:01:53 crc kubenswrapper[4780]: E1210 12:01:53.963566 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:02:04 crc kubenswrapper[4780]: I1210 12:02:04.298965 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:02:04 crc kubenswrapper[4780]: E1210 12:02:04.300248 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:02:05 crc kubenswrapper[4780]: E1210 12:02:05.979899 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:02:06 crc kubenswrapper[4780]: E1210 12:02:06.962658 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:02:18 crc kubenswrapper[4780]: I1210 12:02:18.959903 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:02:18 crc kubenswrapper[4780]: E1210 12:02:18.961224 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:02:18 crc kubenswrapper[4780]: I1210 12:02:18.965662 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:02:19 crc kubenswrapper[4780]: E1210 12:02:19.067829 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:02:19 crc kubenswrapper[4780]: E1210 12:02:19.067905 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:02:19 crc kubenswrapper[4780]: E1210 12:02:19.068108 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:02:19 crc kubenswrapper[4780]: E1210 12:02:19.069414 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:02:19 crc kubenswrapper[4780]: E1210 12:02:19.966654 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:02:22 crc kubenswrapper[4780]: I1210 12:02:22.988807 4780 trace.go:236] Trace[190042772]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-l9h98" (10-Dec-2025 12:02:18.947) (total time: 4041ms): Dec 10 12:02:22 crc kubenswrapper[4780]: Trace[190042772]: [4.041206721s] [4.041206721s] END Dec 10 12:02:31 crc kubenswrapper[4780]: I1210 12:02:31.960771 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:02:31 crc kubenswrapper[4780]: E1210 12:02:31.961800 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:02:31 crc kubenswrapper[4780]: E1210 12:02:31.966626 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:02:33 crc kubenswrapper[4780]: E1210 12:02:33.088696 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:02:33 crc kubenswrapper[4780]: E1210 12:02:33.090085 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:02:33 crc kubenswrapper[4780]: E1210 12:02:33.090504 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:02:33 crc kubenswrapper[4780]: E1210 12:02:33.091970 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:02:43 crc kubenswrapper[4780]: E1210 12:02:43.963034 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:02:44 crc kubenswrapper[4780]: I1210 12:02:44.960232 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:02:44 crc kubenswrapper[4780]: E1210 12:02:44.961120 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:02:46 crc kubenswrapper[4780]: E1210 12:02:46.964184 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:02:56 crc kubenswrapper[4780]: E1210 12:02:56.965797 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:02:57 crc kubenswrapper[4780]: I1210 12:02:57.960120 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:02:57 crc kubenswrapper[4780]: E1210 12:02:57.961235 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:02:59 crc kubenswrapper[4780]: E1210 12:02:59.962722 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.677000 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lq6w7"] Dec 10 12:03:01 crc kubenswrapper[4780]: E1210 12:03:01.693700 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa1fb8b2-2d6f-489e-8d87-3c88d49262b9" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.693773 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa1fb8b2-2d6f-489e-8d87-3c88d49262b9" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:03:01 crc kubenswrapper[4780]: E1210 12:03:01.693859 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1af2bed-664f-49ec-b2f3-7b835900ee5f" containerName="keystone-cron" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.693875 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1af2bed-664f-49ec-b2f3-7b835900ee5f" containerName="keystone-cron" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.696500 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1af2bed-664f-49ec-b2f3-7b835900ee5f" containerName="keystone-cron" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.696559 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa1fb8b2-2d6f-489e-8d87-3c88d49262b9" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.706759 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.772488 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lq6w7"] Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.776568 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-utilities\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.777024 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-catalog-content\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.777680 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqr78\" (UniqueName: \"kubernetes.io/projected/695f12db-5e6f-4838-972f-b463ec79c831-kube-api-access-nqr78\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.881280 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqr78\" (UniqueName: \"kubernetes.io/projected/695f12db-5e6f-4838-972f-b463ec79c831-kube-api-access-nqr78\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.881469 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-utilities\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.881558 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-catalog-content\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.882576 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-utilities\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.882596 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-catalog-content\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:01 crc kubenswrapper[4780]: I1210 12:03:01.910675 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqr78\" (UniqueName: \"kubernetes.io/projected/695f12db-5e6f-4838-972f-b463ec79c831-kube-api-access-nqr78\") pod \"community-operators-lq6w7\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:02 crc kubenswrapper[4780]: I1210 12:03:02.070857 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:03 crc kubenswrapper[4780]: I1210 12:03:03.050320 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lq6w7"] Dec 10 12:03:03 crc kubenswrapper[4780]: W1210 12:03:03.056601 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod695f12db_5e6f_4838_972f_b463ec79c831.slice/crio-7212376adca9514e5fa1351d023c06796bac75a9cafb1eeb4e9d3edc2989b2c7 WatchSource:0}: Error finding container 7212376adca9514e5fa1351d023c06796bac75a9cafb1eeb4e9d3edc2989b2c7: Status 404 returned error can't find the container with id 7212376adca9514e5fa1351d023c06796bac75a9cafb1eeb4e9d3edc2989b2c7 Dec 10 12:03:03 crc kubenswrapper[4780]: I1210 12:03:03.742020 4780 generic.go:334] "Generic (PLEG): container finished" podID="695f12db-5e6f-4838-972f-b463ec79c831" containerID="d04ea6378ee95d85e75684ff7a8a78d32d8e4d0d4d5748e4d93cd8d2b2d87e40" exitCode=0 Dec 10 12:03:03 crc kubenswrapper[4780]: I1210 12:03:03.742398 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lq6w7" event={"ID":"695f12db-5e6f-4838-972f-b463ec79c831","Type":"ContainerDied","Data":"d04ea6378ee95d85e75684ff7a8a78d32d8e4d0d4d5748e4d93cd8d2b2d87e40"} Dec 10 12:03:03 crc kubenswrapper[4780]: I1210 12:03:03.742445 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lq6w7" event={"ID":"695f12db-5e6f-4838-972f-b463ec79c831","Type":"ContainerStarted","Data":"7212376adca9514e5fa1351d023c06796bac75a9cafb1eeb4e9d3edc2989b2c7"} Dec 10 12:03:04 crc kubenswrapper[4780]: I1210 12:03:04.759605 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lq6w7" event={"ID":"695f12db-5e6f-4838-972f-b463ec79c831","Type":"ContainerStarted","Data":"cea2effba2bd7dff241060b459ef095e4ada7110c4b901a7378ce67cb8cd6167"} Dec 10 12:03:06 crc kubenswrapper[4780]: I1210 12:03:06.802528 4780 generic.go:334] "Generic (PLEG): container finished" podID="695f12db-5e6f-4838-972f-b463ec79c831" containerID="cea2effba2bd7dff241060b459ef095e4ada7110c4b901a7378ce67cb8cd6167" exitCode=0 Dec 10 12:03:06 crc kubenswrapper[4780]: I1210 12:03:06.802993 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lq6w7" event={"ID":"695f12db-5e6f-4838-972f-b463ec79c831","Type":"ContainerDied","Data":"cea2effba2bd7dff241060b459ef095e4ada7110c4b901a7378ce67cb8cd6167"} Dec 10 12:03:08 crc kubenswrapper[4780]: I1210 12:03:08.834021 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lq6w7" event={"ID":"695f12db-5e6f-4838-972f-b463ec79c831","Type":"ContainerStarted","Data":"31bdcc0a49f7fcea006798d52d3281034b1343a786b5b3e909276f585cd7e061"} Dec 10 12:03:08 crc kubenswrapper[4780]: I1210 12:03:08.885825 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lq6w7" podStartSLOduration=3.991516788 podStartE2EDuration="7.885793398s" podCreationTimestamp="2025-12-10 12:03:01 +0000 UTC" firstStartedPulling="2025-12-10 12:03:03.745592758 +0000 UTC m=+4688.598986211" lastFinishedPulling="2025-12-10 12:03:07.639869368 +0000 UTC m=+4692.493262821" observedRunningTime="2025-12-10 12:03:08.88310232 +0000 UTC m=+4693.736495773" watchObservedRunningTime="2025-12-10 12:03:08.885793398 +0000 UTC m=+4693.739186841" Dec 10 12:03:08 crc kubenswrapper[4780]: I1210 12:03:08.960099 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:03:08 crc kubenswrapper[4780]: E1210 12:03:08.960552 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:03:08 crc kubenswrapper[4780]: E1210 12:03:08.963071 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:03:10 crc kubenswrapper[4780]: E1210 12:03:10.963202 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:03:12 crc kubenswrapper[4780]: I1210 12:03:12.071271 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:12 crc kubenswrapper[4780]: I1210 12:03:12.071737 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:12 crc kubenswrapper[4780]: I1210 12:03:12.127229 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:12 crc kubenswrapper[4780]: I1210 12:03:12.962019 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:13 crc kubenswrapper[4780]: I1210 12:03:13.041881 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lq6w7"] Dec 10 12:03:14 crc kubenswrapper[4780]: I1210 12:03:14.961732 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lq6w7" podUID="695f12db-5e6f-4838-972f-b463ec79c831" containerName="registry-server" containerID="cri-o://31bdcc0a49f7fcea006798d52d3281034b1343a786b5b3e909276f585cd7e061" gracePeriod=2 Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.025874 4780 generic.go:334] "Generic (PLEG): container finished" podID="695f12db-5e6f-4838-972f-b463ec79c831" containerID="31bdcc0a49f7fcea006798d52d3281034b1343a786b5b3e909276f585cd7e061" exitCode=0 Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.026020 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lq6w7" event={"ID":"695f12db-5e6f-4838-972f-b463ec79c831","Type":"ContainerDied","Data":"31bdcc0a49f7fcea006798d52d3281034b1343a786b5b3e909276f585cd7e061"} Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.026591 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lq6w7" event={"ID":"695f12db-5e6f-4838-972f-b463ec79c831","Type":"ContainerDied","Data":"7212376adca9514e5fa1351d023c06796bac75a9cafb1eeb4e9d3edc2989b2c7"} Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.026611 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7212376adca9514e5fa1351d023c06796bac75a9cafb1eeb4e9d3edc2989b2c7" Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.064232 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.227090 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-utilities\") pod \"695f12db-5e6f-4838-972f-b463ec79c831\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.227389 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-catalog-content\") pod \"695f12db-5e6f-4838-972f-b463ec79c831\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.227500 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqr78\" (UniqueName: \"kubernetes.io/projected/695f12db-5e6f-4838-972f-b463ec79c831-kube-api-access-nqr78\") pod \"695f12db-5e6f-4838-972f-b463ec79c831\" (UID: \"695f12db-5e6f-4838-972f-b463ec79c831\") " Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.230510 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-utilities" (OuterVolumeSpecName: "utilities") pod "695f12db-5e6f-4838-972f-b463ec79c831" (UID: "695f12db-5e6f-4838-972f-b463ec79c831"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.253294 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/695f12db-5e6f-4838-972f-b463ec79c831-kube-api-access-nqr78" (OuterVolumeSpecName: "kube-api-access-nqr78") pod "695f12db-5e6f-4838-972f-b463ec79c831" (UID: "695f12db-5e6f-4838-972f-b463ec79c831"). InnerVolumeSpecName "kube-api-access-nqr78". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.313069 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "695f12db-5e6f-4838-972f-b463ec79c831" (UID: "695f12db-5e6f-4838-972f-b463ec79c831"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.332809 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.332899 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/695f12db-5e6f-4838-972f-b463ec79c831-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:03:19 crc kubenswrapper[4780]: I1210 12:03:19.333121 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqr78\" (UniqueName: \"kubernetes.io/projected/695f12db-5e6f-4838-972f-b463ec79c831-kube-api-access-nqr78\") on node \"crc\" DevicePath \"\"" Dec 10 12:03:20 crc kubenswrapper[4780]: I1210 12:03:20.044734 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lq6w7" Dec 10 12:03:20 crc kubenswrapper[4780]: I1210 12:03:20.104642 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lq6w7"] Dec 10 12:03:20 crc kubenswrapper[4780]: I1210 12:03:20.123556 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lq6w7"] Dec 10 12:03:20 crc kubenswrapper[4780]: I1210 12:03:20.961908 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:03:20 crc kubenswrapper[4780]: E1210 12:03:20.962544 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:03:21 crc kubenswrapper[4780]: I1210 12:03:21.977448 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="695f12db-5e6f-4838-972f-b463ec79c831" path="/var/lib/kubelet/pods/695f12db-5e6f-4838-972f-b463ec79c831/volumes" Dec 10 12:03:22 crc kubenswrapper[4780]: E1210 12:03:22.961488 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.671549 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-96dks"] Dec 10 12:03:24 crc kubenswrapper[4780]: E1210 12:03:24.673010 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="695f12db-5e6f-4838-972f-b463ec79c831" containerName="registry-server" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.673034 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="695f12db-5e6f-4838-972f-b463ec79c831" containerName="registry-server" Dec 10 12:03:24 crc kubenswrapper[4780]: E1210 12:03:24.673071 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="695f12db-5e6f-4838-972f-b463ec79c831" containerName="extract-utilities" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.673082 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="695f12db-5e6f-4838-972f-b463ec79c831" containerName="extract-utilities" Dec 10 12:03:24 crc kubenswrapper[4780]: E1210 12:03:24.673113 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="695f12db-5e6f-4838-972f-b463ec79c831" containerName="extract-content" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.673122 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="695f12db-5e6f-4838-972f-b463ec79c831" containerName="extract-content" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.673477 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="695f12db-5e6f-4838-972f-b463ec79c831" containerName="registry-server" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.676380 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.687693 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-96dks"] Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.839740 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pfsx\" (UniqueName: \"kubernetes.io/projected/359263ed-57ad-4a69-914f-f8badb0fc0f2-kube-api-access-8pfsx\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.841911 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-utilities\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.842235 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-catalog-content\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.945369 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-catalog-content\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.945621 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pfsx\" (UniqueName: \"kubernetes.io/projected/359263ed-57ad-4a69-914f-f8badb0fc0f2-kube-api-access-8pfsx\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.945766 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-utilities\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.946752 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-catalog-content\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.946898 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-utilities\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:24 crc kubenswrapper[4780]: E1210 12:03:24.962956 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:03:24 crc kubenswrapper[4780]: I1210 12:03:24.981958 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pfsx\" (UniqueName: \"kubernetes.io/projected/359263ed-57ad-4a69-914f-f8badb0fc0f2-kube-api-access-8pfsx\") pod \"redhat-operators-96dks\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:25 crc kubenswrapper[4780]: I1210 12:03:25.020568 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:25 crc kubenswrapper[4780]: I1210 12:03:25.921586 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-96dks"] Dec 10 12:03:26 crc kubenswrapper[4780]: I1210 12:03:26.268026 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-96dks" event={"ID":"359263ed-57ad-4a69-914f-f8badb0fc0f2","Type":"ContainerStarted","Data":"8dd4f1019f387e87f5b1aefb03c762bf158d858773cd5fa5592b54807c229098"} Dec 10 12:03:27 crc kubenswrapper[4780]: I1210 12:03:27.303832 4780 generic.go:334] "Generic (PLEG): container finished" podID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerID="c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db" exitCode=0 Dec 10 12:03:27 crc kubenswrapper[4780]: I1210 12:03:27.304589 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-96dks" event={"ID":"359263ed-57ad-4a69-914f-f8badb0fc0f2","Type":"ContainerDied","Data":"c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db"} Dec 10 12:03:29 crc kubenswrapper[4780]: I1210 12:03:29.371955 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-96dks" event={"ID":"359263ed-57ad-4a69-914f-f8badb0fc0f2","Type":"ContainerStarted","Data":"cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8"} Dec 10 12:03:34 crc kubenswrapper[4780]: I1210 12:03:34.036348 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:03:34 crc kubenswrapper[4780]: E1210 12:03:34.039111 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:03:34 crc kubenswrapper[4780]: E1210 12:03:34.039875 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:03:37 crc kubenswrapper[4780]: E1210 12:03:37.963274 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:03:44 crc kubenswrapper[4780]: I1210 12:03:44.959295 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:03:44 crc kubenswrapper[4780]: E1210 12:03:44.960482 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:03:47 crc kubenswrapper[4780]: E1210 12:03:47.964754 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:03:48 crc kubenswrapper[4780]: E1210 12:03:48.962670 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.401512 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-htlc6"] Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.407889 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.421306 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-htlc6"] Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.515012 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnzls\" (UniqueName: \"kubernetes.io/projected/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-kube-api-access-xnzls\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.515471 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-catalog-content\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.515693 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-utilities\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.621200 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-utilities\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.621905 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnzls\" (UniqueName: \"kubernetes.io/projected/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-kube-api-access-xnzls\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.622126 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-catalog-content\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.622736 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-utilities\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.622749 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-catalog-content\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.663676 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnzls\" (UniqueName: \"kubernetes.io/projected/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-kube-api-access-xnzls\") pod \"certified-operators-htlc6\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.828101 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.909986 4780 generic.go:334] "Generic (PLEG): container finished" podID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerID="cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8" exitCode=0 Dec 10 12:03:50 crc kubenswrapper[4780]: I1210 12:03:50.911126 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-96dks" event={"ID":"359263ed-57ad-4a69-914f-f8badb0fc0f2","Type":"ContainerDied","Data":"cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8"} Dec 10 12:03:52 crc kubenswrapper[4780]: I1210 12:03:52.218803 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-htlc6"] Dec 10 12:03:52 crc kubenswrapper[4780]: W1210 12:03:52.247875 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee8ce7eb_4885_4e7a_a804_cf2d3b091467.slice/crio-2a2582e6915e1ea882c259eef3b475a7d9231c94d601d3d5a635940d6cad4bd7 WatchSource:0}: Error finding container 2a2582e6915e1ea882c259eef3b475a7d9231c94d601d3d5a635940d6cad4bd7: Status 404 returned error can't find the container with id 2a2582e6915e1ea882c259eef3b475a7d9231c94d601d3d5a635940d6cad4bd7 Dec 10 12:03:52 crc kubenswrapper[4780]: I1210 12:03:52.989880 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-96dks" event={"ID":"359263ed-57ad-4a69-914f-f8badb0fc0f2","Type":"ContainerStarted","Data":"89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001"} Dec 10 12:03:52 crc kubenswrapper[4780]: I1210 12:03:52.993158 4780 generic.go:334] "Generic (PLEG): container finished" podID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerID="92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283" exitCode=0 Dec 10 12:03:52 crc kubenswrapper[4780]: I1210 12:03:52.993271 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htlc6" event={"ID":"ee8ce7eb-4885-4e7a-a804-cf2d3b091467","Type":"ContainerDied","Data":"92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283"} Dec 10 12:03:52 crc kubenswrapper[4780]: I1210 12:03:52.993361 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htlc6" event={"ID":"ee8ce7eb-4885-4e7a-a804-cf2d3b091467","Type":"ContainerStarted","Data":"2a2582e6915e1ea882c259eef3b475a7d9231c94d601d3d5a635940d6cad4bd7"} Dec 10 12:03:53 crc kubenswrapper[4780]: I1210 12:03:53.021893 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-96dks" podStartSLOduration=4.431394373 podStartE2EDuration="29.02185543s" podCreationTimestamp="2025-12-10 12:03:24 +0000 UTC" firstStartedPulling="2025-12-10 12:03:27.319796964 +0000 UTC m=+4712.173190407" lastFinishedPulling="2025-12-10 12:03:51.910258031 +0000 UTC m=+4736.763651464" observedRunningTime="2025-12-10 12:03:53.017059999 +0000 UTC m=+4737.870453462" watchObservedRunningTime="2025-12-10 12:03:53.02185543 +0000 UTC m=+4737.875248883" Dec 10 12:03:55 crc kubenswrapper[4780]: I1210 12:03:55.020702 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:55 crc kubenswrapper[4780]: I1210 12:03:55.021174 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:03:55 crc kubenswrapper[4780]: I1210 12:03:55.022261 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htlc6" event={"ID":"ee8ce7eb-4885-4e7a-a804-cf2d3b091467","Type":"ContainerStarted","Data":"c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232"} Dec 10 12:03:56 crc kubenswrapper[4780]: I1210 12:03:56.010062 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6579b964d7-f7kj9" podUID="73526536-c600-49b3-b73d-2897a05ce69e" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 10 12:03:56 crc kubenswrapper[4780]: I1210 12:03:56.041523 4780 generic.go:334] "Generic (PLEG): container finished" podID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerID="c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232" exitCode=0 Dec 10 12:03:56 crc kubenswrapper[4780]: I1210 12:03:56.041612 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htlc6" event={"ID":"ee8ce7eb-4885-4e7a-a804-cf2d3b091467","Type":"ContainerDied","Data":"c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232"} Dec 10 12:03:56 crc kubenswrapper[4780]: I1210 12:03:56.083040 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-96dks" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="registry-server" probeResult="failure" output=< Dec 10 12:03:56 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 12:03:56 crc kubenswrapper[4780]: > Dec 10 12:03:56 crc kubenswrapper[4780]: I1210 12:03:56.960896 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:03:56 crc kubenswrapper[4780]: E1210 12:03:56.961764 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:03:57 crc kubenswrapper[4780]: I1210 12:03:57.060937 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htlc6" event={"ID":"ee8ce7eb-4885-4e7a-a804-cf2d3b091467","Type":"ContainerStarted","Data":"ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb"} Dec 10 12:03:57 crc kubenswrapper[4780]: I1210 12:03:57.118116 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-htlc6" podStartSLOduration=3.563166507 podStartE2EDuration="7.118080858s" podCreationTimestamp="2025-12-10 12:03:50 +0000 UTC" firstStartedPulling="2025-12-10 12:03:52.99612981 +0000 UTC m=+4737.849523243" lastFinishedPulling="2025-12-10 12:03:56.551044151 +0000 UTC m=+4741.404437594" observedRunningTime="2025-12-10 12:03:57.106966147 +0000 UTC m=+4741.960359590" watchObservedRunningTime="2025-12-10 12:03:57.118080858 +0000 UTC m=+4741.971474301" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.396666 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-m8phc"] Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.402597 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.427543 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8phc"] Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.585056 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-utilities\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.585127 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-catalog-content\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.585702 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjmn6\" (UniqueName: \"kubernetes.io/projected/d48fbd01-21a3-4e1f-851d-46685fde565e-kube-api-access-tjmn6\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.689390 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjmn6\" (UniqueName: \"kubernetes.io/projected/d48fbd01-21a3-4e1f-851d-46685fde565e-kube-api-access-tjmn6\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.690179 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-utilities\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.690333 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-catalog-content\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.690883 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-utilities\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.691050 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-catalog-content\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.724805 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjmn6\" (UniqueName: \"kubernetes.io/projected/d48fbd01-21a3-4e1f-851d-46685fde565e-kube-api-access-tjmn6\") pod \"redhat-marketplace-m8phc\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: I1210 12:03:59.743528 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:03:59 crc kubenswrapper[4780]: E1210 12:03:59.963533 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:03:59 crc kubenswrapper[4780]: E1210 12:03:59.964762 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:04:00 crc kubenswrapper[4780]: I1210 12:04:00.395598 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8phc"] Dec 10 12:04:01 crc kubenswrapper[4780]: I1210 12:04:01.079667 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:04:01 crc kubenswrapper[4780]: I1210 12:04:01.081527 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:04:01 crc kubenswrapper[4780]: I1210 12:04:01.156590 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:04:01 crc kubenswrapper[4780]: I1210 12:04:01.159389 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8phc" event={"ID":"d48fbd01-21a3-4e1f-851d-46685fde565e","Type":"ContainerStarted","Data":"07e67eeeb0e3b2e2c83a57f3a75ee7f0e8c69de988024d9fc4fbb038f0f78992"} Dec 10 12:04:01 crc kubenswrapper[4780]: I1210 12:04:01.309209 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:04:02 crc kubenswrapper[4780]: I1210 12:04:02.174061 4780 generic.go:334] "Generic (PLEG): container finished" podID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerID="072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0" exitCode=0 Dec 10 12:04:02 crc kubenswrapper[4780]: I1210 12:04:02.174157 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8phc" event={"ID":"d48fbd01-21a3-4e1f-851d-46685fde565e","Type":"ContainerDied","Data":"072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0"} Dec 10 12:04:03 crc kubenswrapper[4780]: I1210 12:04:03.786125 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-htlc6"] Dec 10 12:04:04 crc kubenswrapper[4780]: I1210 12:04:04.227243 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-htlc6" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerName="registry-server" containerID="cri-o://ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb" gracePeriod=2 Dec 10 12:04:04 crc kubenswrapper[4780]: I1210 12:04:04.229644 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8phc" event={"ID":"d48fbd01-21a3-4e1f-851d-46685fde565e","Type":"ContainerStarted","Data":"b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21"} Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.122200 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.252178 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnzls\" (UniqueName: \"kubernetes.io/projected/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-kube-api-access-xnzls\") pod \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.252596 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-utilities\") pod \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.252652 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-catalog-content\") pod \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\" (UID: \"ee8ce7eb-4885-4e7a-a804-cf2d3b091467\") " Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.253547 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-utilities" (OuterVolumeSpecName: "utilities") pod "ee8ce7eb-4885-4e7a-a804-cf2d3b091467" (UID: "ee8ce7eb-4885-4e7a-a804-cf2d3b091467"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.254361 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.255680 4780 generic.go:334] "Generic (PLEG): container finished" podID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerID="b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21" exitCode=0 Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.255847 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8phc" event={"ID":"d48fbd01-21a3-4e1f-851d-46685fde565e","Type":"ContainerDied","Data":"b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21"} Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.260750 4780 generic.go:334] "Generic (PLEG): container finished" podID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerID="ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb" exitCode=0 Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.260789 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htlc6" event={"ID":"ee8ce7eb-4885-4e7a-a804-cf2d3b091467","Type":"ContainerDied","Data":"ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb"} Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.260822 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-htlc6" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.260846 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-htlc6" event={"ID":"ee8ce7eb-4885-4e7a-a804-cf2d3b091467","Type":"ContainerDied","Data":"2a2582e6915e1ea882c259eef3b475a7d9231c94d601d3d5a635940d6cad4bd7"} Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.260871 4780 scope.go:117] "RemoveContainer" containerID="ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.279632 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-kube-api-access-xnzls" (OuterVolumeSpecName: "kube-api-access-xnzls") pod "ee8ce7eb-4885-4e7a-a804-cf2d3b091467" (UID: "ee8ce7eb-4885-4e7a-a804-cf2d3b091467"). InnerVolumeSpecName "kube-api-access-xnzls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.327637 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ee8ce7eb-4885-4e7a-a804-cf2d3b091467" (UID: "ee8ce7eb-4885-4e7a-a804-cf2d3b091467"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.358773 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.358828 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnzls\" (UniqueName: \"kubernetes.io/projected/ee8ce7eb-4885-4e7a-a804-cf2d3b091467-kube-api-access-xnzls\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.359701 4780 scope.go:117] "RemoveContainer" containerID="c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.412019 4780 scope.go:117] "RemoveContainer" containerID="92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.487690 4780 scope.go:117] "RemoveContainer" containerID="ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb" Dec 10 12:04:05 crc kubenswrapper[4780]: E1210 12:04:05.488794 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb\": container with ID starting with ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb not found: ID does not exist" containerID="ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.489130 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb"} err="failed to get container status \"ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb\": rpc error: code = NotFound desc = could not find container \"ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb\": container with ID starting with ce107799c71c988b340b2f613c8b6429e8085334107eb73e5c42bf4e1eac1cdb not found: ID does not exist" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.489182 4780 scope.go:117] "RemoveContainer" containerID="c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232" Dec 10 12:04:05 crc kubenswrapper[4780]: E1210 12:04:05.490185 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232\": container with ID starting with c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232 not found: ID does not exist" containerID="c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.490271 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232"} err="failed to get container status \"c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232\": rpc error: code = NotFound desc = could not find container \"c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232\": container with ID starting with c1c31c2b9522d63d0dfb3255f85e917a3e2c29367fbbeaf519f2c7bb7bf65232 not found: ID does not exist" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.490290 4780 scope.go:117] "RemoveContainer" containerID="92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283" Dec 10 12:04:05 crc kubenswrapper[4780]: E1210 12:04:05.490721 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283\": container with ID starting with 92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283 not found: ID does not exist" containerID="92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.490746 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283"} err="failed to get container status \"92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283\": rpc error: code = NotFound desc = could not find container \"92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283\": container with ID starting with 92efc2ea858ec359b468372ab6b7f6180cc133212f062dbbc87a2bade90cb283 not found: ID does not exist" Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.705960 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-htlc6"] Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.724551 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-htlc6"] Dec 10 12:04:05 crc kubenswrapper[4780]: I1210 12:04:05.977753 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" path="/var/lib/kubelet/pods/ee8ce7eb-4885-4e7a-a804-cf2d3b091467/volumes" Dec 10 12:04:06 crc kubenswrapper[4780]: I1210 12:04:06.101573 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-96dks" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="registry-server" probeResult="failure" output=< Dec 10 12:04:06 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 12:04:06 crc kubenswrapper[4780]: > Dec 10 12:04:07 crc kubenswrapper[4780]: I1210 12:04:07.332540 4780 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.136099077s: [/var/lib/containers/storage/overlay/13efabe1314d5e2978fbf3d00c17f080786c3a653c8c3bd0b415d6524a6c306c/diff /var/log/pods/openstack_openstack-cell1-galera-0_ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b/galera/0.log]; will not log again for this container unless duration exceeds 2s Dec 10 12:04:07 crc kubenswrapper[4780]: I1210 12:04:07.394674 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8phc" event={"ID":"d48fbd01-21a3-4e1f-851d-46685fde565e","Type":"ContainerStarted","Data":"ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f"} Dec 10 12:04:07 crc kubenswrapper[4780]: I1210 12:04:07.508239 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-m8phc" podStartSLOduration=4.947152192 podStartE2EDuration="8.508192548s" podCreationTimestamp="2025-12-10 12:03:59 +0000 UTC" firstStartedPulling="2025-12-10 12:04:02.180231508 +0000 UTC m=+4747.033624951" lastFinishedPulling="2025-12-10 12:04:05.741271864 +0000 UTC m=+4750.594665307" observedRunningTime="2025-12-10 12:04:07.447082465 +0000 UTC m=+4752.300475918" watchObservedRunningTime="2025-12-10 12:04:07.508192548 +0000 UTC m=+4752.361585991" Dec 10 12:04:09 crc kubenswrapper[4780]: I1210 12:04:09.744731 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:04:09 crc kubenswrapper[4780]: I1210 12:04:09.745291 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:04:09 crc kubenswrapper[4780]: I1210 12:04:09.960330 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:04:09 crc kubenswrapper[4780]: E1210 12:04:09.961205 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:04:10 crc kubenswrapper[4780]: I1210 12:04:10.803143 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-m8phc" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="registry-server" probeResult="failure" output=< Dec 10 12:04:10 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 12:04:10 crc kubenswrapper[4780]: > Dec 10 12:04:11 crc kubenswrapper[4780]: E1210 12:04:11.966441 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:04:12 crc kubenswrapper[4780]: E1210 12:04:12.961876 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:04:15 crc kubenswrapper[4780]: I1210 12:04:15.090523 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:04:15 crc kubenswrapper[4780]: I1210 12:04:15.171217 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:04:15 crc kubenswrapper[4780]: I1210 12:04:15.598192 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-96dks"] Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.052206 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj"] Dec 10 12:04:16 crc kubenswrapper[4780]: E1210 12:04:16.053295 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerName="extract-content" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.053339 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerName="extract-content" Dec 10 12:04:16 crc kubenswrapper[4780]: E1210 12:04:16.053395 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerName="registry-server" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.053404 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerName="registry-server" Dec 10 12:04:16 crc kubenswrapper[4780]: E1210 12:04:16.053442 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerName="extract-utilities" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.053452 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerName="extract-utilities" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.053797 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8ce7eb-4885-4e7a-a804-cf2d3b091467" containerName="registry-server" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.055219 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.060280 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.060731 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.061101 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.061441 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.091137 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj"] Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.157689 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.157767 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xglwl\" (UniqueName: \"kubernetes.io/projected/920ea895-bafa-45e4-9005-175e5114e673-kube-api-access-xglwl\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.158150 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.261252 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xglwl\" (UniqueName: \"kubernetes.io/projected/920ea895-bafa-45e4-9005-175e5114e673-kube-api-access-xglwl\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.261567 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.261638 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.269748 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.270838 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.280122 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xglwl\" (UniqueName: \"kubernetes.io/projected/920ea895-bafa-45e4-9005-175e5114e673-kube-api-access-xglwl\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.398862 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:04:16 crc kubenswrapper[4780]: I1210 12:04:16.756375 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-96dks" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="registry-server" containerID="cri-o://89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001" gracePeriod=2 Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.090376 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj"] Dec 10 12:04:17 crc kubenswrapper[4780]: W1210 12:04:17.114833 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod920ea895_bafa_45e4_9005_175e5114e673.slice/crio-687bf130bea61850a39709cefca9cbd0986221b1d5b4cb3bb6c33c942a70dad0 WatchSource:0}: Error finding container 687bf130bea61850a39709cefca9cbd0986221b1d5b4cb3bb6c33c942a70dad0: Status 404 returned error can't find the container with id 687bf130bea61850a39709cefca9cbd0986221b1d5b4cb3bb6c33c942a70dad0 Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.243058 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.291735 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pfsx\" (UniqueName: \"kubernetes.io/projected/359263ed-57ad-4a69-914f-f8badb0fc0f2-kube-api-access-8pfsx\") pod \"359263ed-57ad-4a69-914f-f8badb0fc0f2\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.292173 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-utilities\") pod \"359263ed-57ad-4a69-914f-f8badb0fc0f2\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.292324 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-catalog-content\") pod \"359263ed-57ad-4a69-914f-f8badb0fc0f2\" (UID: \"359263ed-57ad-4a69-914f-f8badb0fc0f2\") " Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.294550 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-utilities" (OuterVolumeSpecName: "utilities") pod "359263ed-57ad-4a69-914f-f8badb0fc0f2" (UID: "359263ed-57ad-4a69-914f-f8badb0fc0f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.305772 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/359263ed-57ad-4a69-914f-f8badb0fc0f2-kube-api-access-8pfsx" (OuterVolumeSpecName: "kube-api-access-8pfsx") pod "359263ed-57ad-4a69-914f-f8badb0fc0f2" (UID: "359263ed-57ad-4a69-914f-f8badb0fc0f2"). InnerVolumeSpecName "kube-api-access-8pfsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.397036 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pfsx\" (UniqueName: \"kubernetes.io/projected/359263ed-57ad-4a69-914f-f8badb0fc0f2-kube-api-access-8pfsx\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.397102 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.432075 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "359263ed-57ad-4a69-914f-f8badb0fc0f2" (UID: "359263ed-57ad-4a69-914f-f8badb0fc0f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.500266 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/359263ed-57ad-4a69-914f-f8badb0fc0f2-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.771160 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" event={"ID":"920ea895-bafa-45e4-9005-175e5114e673","Type":"ContainerStarted","Data":"687bf130bea61850a39709cefca9cbd0986221b1d5b4cb3bb6c33c942a70dad0"} Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.775041 4780 generic.go:334] "Generic (PLEG): container finished" podID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerID="89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001" exitCode=0 Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.775075 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-96dks" event={"ID":"359263ed-57ad-4a69-914f-f8badb0fc0f2","Type":"ContainerDied","Data":"89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001"} Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.775114 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-96dks" event={"ID":"359263ed-57ad-4a69-914f-f8badb0fc0f2","Type":"ContainerDied","Data":"8dd4f1019f387e87f5b1aefb03c762bf158d858773cd5fa5592b54807c229098"} Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.775133 4780 scope.go:117] "RemoveContainer" containerID="89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.775170 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-96dks" Dec 10 12:04:17 crc kubenswrapper[4780]: I1210 12:04:17.983430 4780 scope.go:117] "RemoveContainer" containerID="cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8" Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.041305 4780 scope.go:117] "RemoveContainer" containerID="c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db" Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.041774 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-96dks"] Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.061047 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-96dks"] Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.081035 4780 scope.go:117] "RemoveContainer" containerID="89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001" Dec 10 12:04:18 crc kubenswrapper[4780]: E1210 12:04:18.082318 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001\": container with ID starting with 89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001 not found: ID does not exist" containerID="89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001" Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.082382 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001"} err="failed to get container status \"89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001\": rpc error: code = NotFound desc = could not find container \"89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001\": container with ID starting with 89cb94f67e7a91f58d79bce4ac8c68c3048da5464227a32bef16fa3b234ec001 not found: ID does not exist" Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.082422 4780 scope.go:117] "RemoveContainer" containerID="cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8" Dec 10 12:04:18 crc kubenswrapper[4780]: E1210 12:04:18.083031 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8\": container with ID starting with cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8 not found: ID does not exist" containerID="cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8" Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.083133 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8"} err="failed to get container status \"cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8\": rpc error: code = NotFound desc = could not find container \"cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8\": container with ID starting with cbc5b5fae2c0a54fbc4a455233a3397c267f2ceae7bae5b0c1bf2a03dc13e9a8 not found: ID does not exist" Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.083188 4780 scope.go:117] "RemoveContainer" containerID="c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db" Dec 10 12:04:18 crc kubenswrapper[4780]: E1210 12:04:18.083597 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db\": container with ID starting with c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db not found: ID does not exist" containerID="c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db" Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.083633 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db"} err="failed to get container status \"c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db\": rpc error: code = NotFound desc = could not find container \"c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db\": container with ID starting with c62364b1b20cef5946269a2c299bc263fa9a87db546a0660ec069c3a792ab7db not found: ID does not exist" Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.795176 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" event={"ID":"920ea895-bafa-45e4-9005-175e5114e673","Type":"ContainerStarted","Data":"9c2933630ac7e66c1cec983c8375e1af511d6a2bc7682be66a3b89cf675bf0dd"} Dec 10 12:04:18 crc kubenswrapper[4780]: I1210 12:04:18.831644 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" podStartSLOduration=2.174724547 podStartE2EDuration="2.831618864s" podCreationTimestamp="2025-12-10 12:04:16 +0000 UTC" firstStartedPulling="2025-12-10 12:04:17.119167535 +0000 UTC m=+4761.972560978" lastFinishedPulling="2025-12-10 12:04:17.776061852 +0000 UTC m=+4762.629455295" observedRunningTime="2025-12-10 12:04:18.817620641 +0000 UTC m=+4763.671014104" watchObservedRunningTime="2025-12-10 12:04:18.831618864 +0000 UTC m=+4763.685012307" Dec 10 12:04:19 crc kubenswrapper[4780]: I1210 12:04:19.811679 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:04:19 crc kubenswrapper[4780]: I1210 12:04:19.876713 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:04:20 crc kubenswrapper[4780]: I1210 12:04:20.016107 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" path="/var/lib/kubelet/pods/359263ed-57ad-4a69-914f-f8badb0fc0f2/volumes" Dec 10 12:04:20 crc kubenswrapper[4780]: I1210 12:04:20.944729 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8phc"] Dec 10 12:04:21 crc kubenswrapper[4780]: I1210 12:04:21.831908 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-m8phc" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="registry-server" containerID="cri-o://ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f" gracePeriod=2 Dec 10 12:04:21 crc kubenswrapper[4780]: I1210 12:04:21.967716 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:04:21 crc kubenswrapper[4780]: E1210 12:04:21.968211 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.580086 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.654757 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-catalog-content\") pod \"d48fbd01-21a3-4e1f-851d-46685fde565e\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.654877 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-utilities\") pod \"d48fbd01-21a3-4e1f-851d-46685fde565e\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.655077 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjmn6\" (UniqueName: \"kubernetes.io/projected/d48fbd01-21a3-4e1f-851d-46685fde565e-kube-api-access-tjmn6\") pod \"d48fbd01-21a3-4e1f-851d-46685fde565e\" (UID: \"d48fbd01-21a3-4e1f-851d-46685fde565e\") " Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.656178 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-utilities" (OuterVolumeSpecName: "utilities") pod "d48fbd01-21a3-4e1f-851d-46685fde565e" (UID: "d48fbd01-21a3-4e1f-851d-46685fde565e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.668805 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d48fbd01-21a3-4e1f-851d-46685fde565e-kube-api-access-tjmn6" (OuterVolumeSpecName: "kube-api-access-tjmn6") pod "d48fbd01-21a3-4e1f-851d-46685fde565e" (UID: "d48fbd01-21a3-4e1f-851d-46685fde565e"). InnerVolumeSpecName "kube-api-access-tjmn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.677876 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d48fbd01-21a3-4e1f-851d-46685fde565e" (UID: "d48fbd01-21a3-4e1f-851d-46685fde565e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.758623 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.758705 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48fbd01-21a3-4e1f-851d-46685fde565e-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.758719 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjmn6\" (UniqueName: \"kubernetes.io/projected/d48fbd01-21a3-4e1f-851d-46685fde565e-kube-api-access-tjmn6\") on node \"crc\" DevicePath \"\"" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.846673 4780 generic.go:334] "Generic (PLEG): container finished" podID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerID="ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f" exitCode=0 Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.846724 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8phc" event={"ID":"d48fbd01-21a3-4e1f-851d-46685fde565e","Type":"ContainerDied","Data":"ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f"} Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.846745 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m8phc" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.846769 4780 scope.go:117] "RemoveContainer" containerID="ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.846755 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m8phc" event={"ID":"d48fbd01-21a3-4e1f-851d-46685fde565e","Type":"ContainerDied","Data":"07e67eeeb0e3b2e2c83a57f3a75ee7f0e8c69de988024d9fc4fbb038f0f78992"} Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.886779 4780 scope.go:117] "RemoveContainer" containerID="b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.895078 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8phc"] Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.907705 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-m8phc"] Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.915579 4780 scope.go:117] "RemoveContainer" containerID="072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.979840 4780 scope.go:117] "RemoveContainer" containerID="ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f" Dec 10 12:04:22 crc kubenswrapper[4780]: E1210 12:04:22.980624 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f\": container with ID starting with ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f not found: ID does not exist" containerID="ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.980693 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f"} err="failed to get container status \"ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f\": rpc error: code = NotFound desc = could not find container \"ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f\": container with ID starting with ee147a302a50c102ff01130b84c558955bf3c32ab872f3f3f65aaa04ef0a541f not found: ID does not exist" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.980750 4780 scope.go:117] "RemoveContainer" containerID="b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21" Dec 10 12:04:22 crc kubenswrapper[4780]: E1210 12:04:22.981455 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21\": container with ID starting with b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21 not found: ID does not exist" containerID="b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.981515 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21"} err="failed to get container status \"b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21\": rpc error: code = NotFound desc = could not find container \"b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21\": container with ID starting with b5d92c25ca06ba7bd525eaff6bf5b71a9831222dd1f9e9b7f5923ae835d93f21 not found: ID does not exist" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.981557 4780 scope.go:117] "RemoveContainer" containerID="072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0" Dec 10 12:04:22 crc kubenswrapper[4780]: E1210 12:04:22.982237 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0\": container with ID starting with 072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0 not found: ID does not exist" containerID="072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0" Dec 10 12:04:22 crc kubenswrapper[4780]: I1210 12:04:22.982306 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0"} err="failed to get container status \"072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0\": rpc error: code = NotFound desc = could not find container \"072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0\": container with ID starting with 072022fb48614915ce4b20b3d2956ef9bc08210c3fcda0ae7055e50d903215f0 not found: ID does not exist" Dec 10 12:04:24 crc kubenswrapper[4780]: E1210 12:04:24.027749 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:04:24 crc kubenswrapper[4780]: I1210 12:04:24.062173 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" path="/var/lib/kubelet/pods/d48fbd01-21a3-4e1f-851d-46685fde565e/volumes" Dec 10 12:04:24 crc kubenswrapper[4780]: E1210 12:04:24.961592 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:04:35 crc kubenswrapper[4780]: I1210 12:04:35.017538 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:04:35 crc kubenswrapper[4780]: E1210 12:04:35.018945 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:04:35 crc kubenswrapper[4780]: E1210 12:04:35.972005 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:04:38 crc kubenswrapper[4780]: E1210 12:04:38.961960 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:04:46 crc kubenswrapper[4780]: E1210 12:04:46.972160 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:04:47 crc kubenswrapper[4780]: I1210 12:04:47.960154 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:04:47 crc kubenswrapper[4780]: E1210 12:04:47.962337 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:04:51 crc kubenswrapper[4780]: E1210 12:04:51.963964 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:05:01 crc kubenswrapper[4780]: I1210 12:05:01.960257 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:05:01 crc kubenswrapper[4780]: E1210 12:05:01.961402 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:05:01 crc kubenswrapper[4780]: E1210 12:05:01.967098 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:05:06 crc kubenswrapper[4780]: E1210 12:05:06.963413 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:05:15 crc kubenswrapper[4780]: I1210 12:05:15.987392 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:05:15 crc kubenswrapper[4780]: E1210 12:05:15.988509 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:05:15 crc kubenswrapper[4780]: E1210 12:05:15.991407 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:05:19 crc kubenswrapper[4780]: E1210 12:05:19.963212 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:05:28 crc kubenswrapper[4780]: E1210 12:05:28.961713 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:05:30 crc kubenswrapper[4780]: I1210 12:05:30.959329 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:05:30 crc kubenswrapper[4780]: E1210 12:05:30.959824 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:05:31 crc kubenswrapper[4780]: E1210 12:05:31.963887 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:05:41 crc kubenswrapper[4780]: E1210 12:05:41.962834 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:05:44 crc kubenswrapper[4780]: I1210 12:05:44.959613 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:05:44 crc kubenswrapper[4780]: E1210 12:05:44.960883 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:05:46 crc kubenswrapper[4780]: E1210 12:05:46.962766 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:05:56 crc kubenswrapper[4780]: E1210 12:05:56.072711 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:05:57 crc kubenswrapper[4780]: E1210 12:05:57.962529 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:05:59 crc kubenswrapper[4780]: I1210 12:05:59.959521 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:06:00 crc kubenswrapper[4780]: I1210 12:06:00.691838 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"d58cc4151b0bc840795f2245c54950cefcb2fdd18000129400126c1f73af997a"} Dec 10 12:06:07 crc kubenswrapper[4780]: E1210 12:06:07.964565 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:06:08 crc kubenswrapper[4780]: E1210 12:06:08.963856 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:06:19 crc kubenswrapper[4780]: E1210 12:06:19.963440 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:06:20 crc kubenswrapper[4780]: E1210 12:06:20.963553 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:06:34 crc kubenswrapper[4780]: E1210 12:06:34.965245 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:06:35 crc kubenswrapper[4780]: E1210 12:06:35.969257 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:06:46 crc kubenswrapper[4780]: E1210 12:06:46.034036 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:06:47 crc kubenswrapper[4780]: E1210 12:06:47.961673 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:06:59 crc kubenswrapper[4780]: E1210 12:06:59.961474 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:07:00 crc kubenswrapper[4780]: E1210 12:07:00.961557 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:07:10 crc kubenswrapper[4780]: E1210 12:07:10.962574 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:07:11 crc kubenswrapper[4780]: E1210 12:07:11.964431 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:07:21 crc kubenswrapper[4780]: I1210 12:07:21.964566 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:07:22 crc kubenswrapper[4780]: E1210 12:07:22.131711 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:07:22 crc kubenswrapper[4780]: E1210 12:07:22.131785 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:07:22 crc kubenswrapper[4780]: E1210 12:07:22.132047 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:07:22 crc kubenswrapper[4780]: E1210 12:07:22.133663 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:07:26 crc kubenswrapper[4780]: E1210 12:07:26.965155 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:07:35 crc kubenswrapper[4780]: E1210 12:07:35.972748 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:07:39 crc kubenswrapper[4780]: I1210 12:07:39.909035 4780 trace.go:236] Trace[710412244]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-l9h98" (10-Dec-2025 12:07:38.886) (total time: 1022ms): Dec 10 12:07:39 crc kubenswrapper[4780]: Trace[710412244]: [1.02233225s] [1.02233225s] END Dec 10 12:07:41 crc kubenswrapper[4780]: E1210 12:07:41.083492 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:07:41 crc kubenswrapper[4780]: E1210 12:07:41.083616 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:07:41 crc kubenswrapper[4780]: E1210 12:07:41.083844 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:07:41 crc kubenswrapper[4780]: E1210 12:07:41.085178 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:07:48 crc kubenswrapper[4780]: E1210 12:07:48.961626 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:07:52 crc kubenswrapper[4780]: E1210 12:07:52.968002 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:08:00 crc kubenswrapper[4780]: E1210 12:08:00.965115 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:08:05 crc kubenswrapper[4780]: E1210 12:08:05.973845 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:08:12 crc kubenswrapper[4780]: E1210 12:08:12.962010 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:08:19 crc kubenswrapper[4780]: E1210 12:08:19.965428 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:08:26 crc kubenswrapper[4780]: E1210 12:08:26.963514 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:08:27 crc kubenswrapper[4780]: I1210 12:08:27.475749 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:08:27 crc kubenswrapper[4780]: I1210 12:08:27.475835 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:08:34 crc kubenswrapper[4780]: E1210 12:08:34.962772 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:08:37 crc kubenswrapper[4780]: E1210 12:08:37.964568 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:08:49 crc kubenswrapper[4780]: E1210 12:08:49.964601 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:08:49 crc kubenswrapper[4780]: E1210 12:08:49.964722 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:08:57 crc kubenswrapper[4780]: I1210 12:08:57.476212 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:08:57 crc kubenswrapper[4780]: I1210 12:08:57.477186 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:09:01 crc kubenswrapper[4780]: E1210 12:09:01.963280 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:09:03 crc kubenswrapper[4780]: E1210 12:09:03.964853 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:09:14 crc kubenswrapper[4780]: E1210 12:09:14.973509 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:09:14 crc kubenswrapper[4780]: E1210 12:09:14.989218 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:09:27 crc kubenswrapper[4780]: I1210 12:09:27.475857 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:09:27 crc kubenswrapper[4780]: I1210 12:09:27.476666 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:09:27 crc kubenswrapper[4780]: I1210 12:09:27.476733 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 12:09:27 crc kubenswrapper[4780]: I1210 12:09:27.478150 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d58cc4151b0bc840795f2245c54950cefcb2fdd18000129400126c1f73af997a"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:09:27 crc kubenswrapper[4780]: I1210 12:09:27.478228 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://d58cc4151b0bc840795f2245c54950cefcb2fdd18000129400126c1f73af997a" gracePeriod=600 Dec 10 12:09:27 crc kubenswrapper[4780]: E1210 12:09:27.961211 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:09:28 crc kubenswrapper[4780]: I1210 12:09:28.532625 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="d58cc4151b0bc840795f2245c54950cefcb2fdd18000129400126c1f73af997a" exitCode=0 Dec 10 12:09:28 crc kubenswrapper[4780]: I1210 12:09:28.532721 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"d58cc4151b0bc840795f2245c54950cefcb2fdd18000129400126c1f73af997a"} Dec 10 12:09:28 crc kubenswrapper[4780]: I1210 12:09:28.533449 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372"} Dec 10 12:09:28 crc kubenswrapper[4780]: I1210 12:09:28.533526 4780 scope.go:117] "RemoveContainer" containerID="94a435be940f553f43abb3eb093a0737bc400f1f1f1a27fc525901a73de5cb74" Dec 10 12:09:29 crc kubenswrapper[4780]: E1210 12:09:29.964760 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:09:41 crc kubenswrapper[4780]: E1210 12:09:41.962682 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:09:41 crc kubenswrapper[4780]: E1210 12:09:41.963185 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:09:48 crc kubenswrapper[4780]: I1210 12:09:48.088583 4780 scope.go:117] "RemoveContainer" containerID="cea2effba2bd7dff241060b459ef095e4ada7110c4b901a7378ce67cb8cd6167" Dec 10 12:09:48 crc kubenswrapper[4780]: I1210 12:09:48.147442 4780 scope.go:117] "RemoveContainer" containerID="31bdcc0a49f7fcea006798d52d3281034b1343a786b5b3e909276f585cd7e061" Dec 10 12:09:48 crc kubenswrapper[4780]: I1210 12:09:48.202233 4780 scope.go:117] "RemoveContainer" containerID="d04ea6378ee95d85e75684ff7a8a78d32d8e4d0d4d5748e4d93cd8d2b2d87e40" Dec 10 12:09:53 crc kubenswrapper[4780]: E1210 12:09:53.962646 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:09:56 crc kubenswrapper[4780]: E1210 12:09:56.962399 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:10:07 crc kubenswrapper[4780]: E1210 12:10:07.961905 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:10:10 crc kubenswrapper[4780]: E1210 12:10:10.961821 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:10:20 crc kubenswrapper[4780]: E1210 12:10:20.962240 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:10:24 crc kubenswrapper[4780]: E1210 12:10:24.962631 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:10:33 crc kubenswrapper[4780]: E1210 12:10:33.964950 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:10:36 crc kubenswrapper[4780]: E1210 12:10:36.962635 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:10:42 crc kubenswrapper[4780]: I1210 12:10:42.959088 4780 generic.go:334] "Generic (PLEG): container finished" podID="920ea895-bafa-45e4-9005-175e5114e673" containerID="9c2933630ac7e66c1cec983c8375e1af511d6a2bc7682be66a3b89cf675bf0dd" exitCode=2 Dec 10 12:10:42 crc kubenswrapper[4780]: I1210 12:10:42.959168 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" event={"ID":"920ea895-bafa-45e4-9005-175e5114e673","Type":"ContainerDied","Data":"9c2933630ac7e66c1cec983c8375e1af511d6a2bc7682be66a3b89cf675bf0dd"} Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.601194 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.807161 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xglwl\" (UniqueName: \"kubernetes.io/projected/920ea895-bafa-45e4-9005-175e5114e673-kube-api-access-xglwl\") pod \"920ea895-bafa-45e4-9005-175e5114e673\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.807231 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-inventory\") pod \"920ea895-bafa-45e4-9005-175e5114e673\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.807271 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-ssh-key\") pod \"920ea895-bafa-45e4-9005-175e5114e673\" (UID: \"920ea895-bafa-45e4-9005-175e5114e673\") " Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.817366 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/920ea895-bafa-45e4-9005-175e5114e673-kube-api-access-xglwl" (OuterVolumeSpecName: "kube-api-access-xglwl") pod "920ea895-bafa-45e4-9005-175e5114e673" (UID: "920ea895-bafa-45e4-9005-175e5114e673"). InnerVolumeSpecName "kube-api-access-xglwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.864603 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-inventory" (OuterVolumeSpecName: "inventory") pod "920ea895-bafa-45e4-9005-175e5114e673" (UID: "920ea895-bafa-45e4-9005-175e5114e673"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.865417 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "920ea895-bafa-45e4-9005-175e5114e673" (UID: "920ea895-bafa-45e4-9005-175e5114e673"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.911896 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.911960 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xglwl\" (UniqueName: \"kubernetes.io/projected/920ea895-bafa-45e4-9005-175e5114e673-kube-api-access-xglwl\") on node \"crc\" DevicePath \"\"" Dec 10 12:10:44 crc kubenswrapper[4780]: I1210 12:10:44.911975 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/920ea895-bafa-45e4-9005-175e5114e673-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 12:10:45 crc kubenswrapper[4780]: I1210 12:10:45.025530 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" event={"ID":"920ea895-bafa-45e4-9005-175e5114e673","Type":"ContainerDied","Data":"687bf130bea61850a39709cefca9cbd0986221b1d5b4cb3bb6c33c942a70dad0"} Dec 10 12:10:45 crc kubenswrapper[4780]: I1210 12:10:45.025633 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="687bf130bea61850a39709cefca9cbd0986221b1d5b4cb3bb6c33c942a70dad0" Dec 10 12:10:45 crc kubenswrapper[4780]: I1210 12:10:45.025665 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj" Dec 10 12:10:45 crc kubenswrapper[4780]: E1210 12:10:45.972136 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:10:48 crc kubenswrapper[4780]: E1210 12:10:48.962450 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:11:00 crc kubenswrapper[4780]: E1210 12:11:00.963190 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:11:03 crc kubenswrapper[4780]: E1210 12:11:03.963273 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:11:13 crc kubenswrapper[4780]: E1210 12:11:13.963194 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:11:18 crc kubenswrapper[4780]: E1210 12:11:18.964242 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:11:27 crc kubenswrapper[4780]: I1210 12:11:27.475563 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:11:27 crc kubenswrapper[4780]: I1210 12:11:27.476598 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:11:29 crc kubenswrapper[4780]: E1210 12:11:29.058214 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:11:30 crc kubenswrapper[4780]: E1210 12:11:30.964606 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:11:42 crc kubenswrapper[4780]: E1210 12:11:42.962264 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:11:43 crc kubenswrapper[4780]: E1210 12:11:43.963720 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:11:55 crc kubenswrapper[4780]: E1210 12:11:55.972706 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:11:55 crc kubenswrapper[4780]: E1210 12:11:55.972750 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:11:57 crc kubenswrapper[4780]: I1210 12:11:57.475888 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:11:57 crc kubenswrapper[4780]: I1210 12:11:57.476381 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:12:07 crc kubenswrapper[4780]: E1210 12:12:07.962609 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:12:09 crc kubenswrapper[4780]: E1210 12:12:09.211375 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:12:21 crc kubenswrapper[4780]: E1210 12:12:21.963339 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:12:24 crc kubenswrapper[4780]: I1210 12:12:24.962467 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:12:25 crc kubenswrapper[4780]: E1210 12:12:25.096091 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:12:25 crc kubenswrapper[4780]: E1210 12:12:25.096175 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:12:25 crc kubenswrapper[4780]: E1210 12:12:25.096422 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:12:25 crc kubenswrapper[4780]: E1210 12:12:25.098011 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:12:27 crc kubenswrapper[4780]: I1210 12:12:27.475872 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:12:27 crc kubenswrapper[4780]: I1210 12:12:27.476394 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:12:27 crc kubenswrapper[4780]: I1210 12:12:27.476475 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 12:12:27 crc kubenswrapper[4780]: I1210 12:12:27.477953 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:12:27 crc kubenswrapper[4780]: I1210 12:12:27.478036 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" gracePeriod=600 Dec 10 12:12:28 crc kubenswrapper[4780]: I1210 12:12:28.415585 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b" containerName="galera" probeResult="failure" output="command timed out" Dec 10 12:12:28 crc kubenswrapper[4780]: I1210 12:12:28.417091 4780 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b" containerName="galera" probeResult="failure" output="command timed out" Dec 10 12:12:28 crc kubenswrapper[4780]: I1210 12:12:28.721370 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" exitCode=0 Dec 10 12:12:28 crc kubenswrapper[4780]: I1210 12:12:28.721449 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372"} Dec 10 12:12:28 crc kubenswrapper[4780]: I1210 12:12:28.721522 4780 scope.go:117] "RemoveContainer" containerID="d58cc4151b0bc840795f2245c54950cefcb2fdd18000129400126c1f73af997a" Dec 10 12:12:29 crc kubenswrapper[4780]: E1210 12:12:29.037150 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:12:29 crc kubenswrapper[4780]: I1210 12:12:29.740046 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:12:29 crc kubenswrapper[4780]: E1210 12:12:29.740713 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:12:33 crc kubenswrapper[4780]: E1210 12:12:33.962100 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:12:35 crc kubenswrapper[4780]: E1210 12:12:35.970642 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:12:40 crc kubenswrapper[4780]: I1210 12:12:40.961400 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:12:40 crc kubenswrapper[4780]: E1210 12:12:40.962355 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:12:47 crc kubenswrapper[4780]: E1210 12:12:47.094094 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:12:47 crc kubenswrapper[4780]: E1210 12:12:47.094880 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:12:47 crc kubenswrapper[4780]: E1210 12:12:47.095130 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:12:47 crc kubenswrapper[4780]: E1210 12:12:47.096333 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:12:47 crc kubenswrapper[4780]: E1210 12:12:47.964198 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:12:55 crc kubenswrapper[4780]: I1210 12:12:55.973766 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:12:55 crc kubenswrapper[4780]: E1210 12:12:55.975719 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:12:58 crc kubenswrapper[4780]: E1210 12:12:58.962384 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:12:58 crc kubenswrapper[4780]: E1210 12:12:58.962493 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:13:09 crc kubenswrapper[4780]: I1210 12:13:09.959406 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:13:09 crc kubenswrapper[4780]: E1210 12:13:09.960483 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:13:12 crc kubenswrapper[4780]: E1210 12:13:12.962441 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:13:13 crc kubenswrapper[4780]: E1210 12:13:13.964387 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.387527 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gkkdn"] Dec 10 12:13:22 crc kubenswrapper[4780]: E1210 12:13:22.389100 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="extract-content" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389130 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="extract-content" Dec 10 12:13:22 crc kubenswrapper[4780]: E1210 12:13:22.389149 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="920ea895-bafa-45e4-9005-175e5114e673" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389160 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="920ea895-bafa-45e4-9005-175e5114e673" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:13:22 crc kubenswrapper[4780]: E1210 12:13:22.389180 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="registry-server" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389189 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="registry-server" Dec 10 12:13:22 crc kubenswrapper[4780]: E1210 12:13:22.389204 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="extract-content" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389212 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="extract-content" Dec 10 12:13:22 crc kubenswrapper[4780]: E1210 12:13:22.389229 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="registry-server" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389237 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="registry-server" Dec 10 12:13:22 crc kubenswrapper[4780]: E1210 12:13:22.389262 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="extract-utilities" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389270 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="extract-utilities" Dec 10 12:13:22 crc kubenswrapper[4780]: E1210 12:13:22.389307 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="extract-utilities" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389313 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="extract-utilities" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389613 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="359263ed-57ad-4a69-914f-f8badb0fc0f2" containerName="registry-server" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389635 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="920ea895-bafa-45e4-9005-175e5114e673" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.389648 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="d48fbd01-21a3-4e1f-851d-46685fde565e" containerName="registry-server" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.392248 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.417527 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gkkdn"] Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.434526 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjcv9\" (UniqueName: \"kubernetes.io/projected/050d2aa9-9c42-4112-8861-57018b12ec11-kube-api-access-pjcv9\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.435033 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-utilities\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.435239 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-catalog-content\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.543031 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjcv9\" (UniqueName: \"kubernetes.io/projected/050d2aa9-9c42-4112-8861-57018b12ec11-kube-api-access-pjcv9\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.543200 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-utilities\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.543263 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-catalog-content\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.543999 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-catalog-content\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.544039 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-utilities\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.922565 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjcv9\" (UniqueName: \"kubernetes.io/projected/050d2aa9-9c42-4112-8861-57018b12ec11-kube-api-access-pjcv9\") pod \"community-operators-gkkdn\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:22 crc kubenswrapper[4780]: I1210 12:13:22.964181 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:13:22 crc kubenswrapper[4780]: E1210 12:13:22.964705 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:13:23 crc kubenswrapper[4780]: I1210 12:13:23.048187 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:23 crc kubenswrapper[4780]: I1210 12:13:23.585039 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gkkdn"] Dec 10 12:13:23 crc kubenswrapper[4780]: I1210 12:13:23.616445 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gkkdn" event={"ID":"050d2aa9-9c42-4112-8861-57018b12ec11","Type":"ContainerStarted","Data":"582a2aae42aa5f30939c8ad2db6ad3ad50534b85b162e6db18c0bb16e60b8afa"} Dec 10 12:13:24 crc kubenswrapper[4780]: I1210 12:13:24.632074 4780 generic.go:334] "Generic (PLEG): container finished" podID="050d2aa9-9c42-4112-8861-57018b12ec11" containerID="a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f" exitCode=0 Dec 10 12:13:24 crc kubenswrapper[4780]: I1210 12:13:24.632208 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gkkdn" event={"ID":"050d2aa9-9c42-4112-8861-57018b12ec11","Type":"ContainerDied","Data":"a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f"} Dec 10 12:13:24 crc kubenswrapper[4780]: E1210 12:13:24.961119 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:13:26 crc kubenswrapper[4780]: I1210 12:13:26.672759 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gkkdn" event={"ID":"050d2aa9-9c42-4112-8861-57018b12ec11","Type":"ContainerStarted","Data":"5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d"} Dec 10 12:13:26 crc kubenswrapper[4780]: E1210 12:13:26.961331 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:13:27 crc kubenswrapper[4780]: I1210 12:13:27.688584 4780 generic.go:334] "Generic (PLEG): container finished" podID="050d2aa9-9c42-4112-8861-57018b12ec11" containerID="5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d" exitCode=0 Dec 10 12:13:27 crc kubenswrapper[4780]: I1210 12:13:27.689001 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gkkdn" event={"ID":"050d2aa9-9c42-4112-8861-57018b12ec11","Type":"ContainerDied","Data":"5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d"} Dec 10 12:13:29 crc kubenswrapper[4780]: I1210 12:13:29.716318 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gkkdn" event={"ID":"050d2aa9-9c42-4112-8861-57018b12ec11","Type":"ContainerStarted","Data":"9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf"} Dec 10 12:13:29 crc kubenswrapper[4780]: I1210 12:13:29.748108 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gkkdn" podStartSLOduration=3.954304454 podStartE2EDuration="7.748074422s" podCreationTimestamp="2025-12-10 12:13:22 +0000 UTC" firstStartedPulling="2025-12-10 12:13:24.635574393 +0000 UTC m=+5309.488967836" lastFinishedPulling="2025-12-10 12:13:28.429344361 +0000 UTC m=+5313.282737804" observedRunningTime="2025-12-10 12:13:29.736289634 +0000 UTC m=+5314.589683067" watchObservedRunningTime="2025-12-10 12:13:29.748074422 +0000 UTC m=+5314.601467875" Dec 10 12:13:33 crc kubenswrapper[4780]: I1210 12:13:33.048469 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:33 crc kubenswrapper[4780]: I1210 12:13:33.050726 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:33 crc kubenswrapper[4780]: I1210 12:13:33.107989 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:33 crc kubenswrapper[4780]: I1210 12:13:33.938515 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:34 crc kubenswrapper[4780]: I1210 12:13:34.012377 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gkkdn"] Dec 10 12:13:35 crc kubenswrapper[4780]: I1210 12:13:35.923252 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gkkdn" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" containerName="registry-server" containerID="cri-o://9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf" gracePeriod=2 Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.774366 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.922321 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-catalog-content\") pod \"050d2aa9-9c42-4112-8861-57018b12ec11\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.922378 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-utilities\") pod \"050d2aa9-9c42-4112-8861-57018b12ec11\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.922465 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjcv9\" (UniqueName: \"kubernetes.io/projected/050d2aa9-9c42-4112-8861-57018b12ec11-kube-api-access-pjcv9\") pod \"050d2aa9-9c42-4112-8861-57018b12ec11\" (UID: \"050d2aa9-9c42-4112-8861-57018b12ec11\") " Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.924352 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-utilities" (OuterVolumeSpecName: "utilities") pod "050d2aa9-9c42-4112-8861-57018b12ec11" (UID: "050d2aa9-9c42-4112-8861-57018b12ec11"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.944314 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gkkdn" event={"ID":"050d2aa9-9c42-4112-8861-57018b12ec11","Type":"ContainerDied","Data":"9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf"} Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.944447 4780 scope.go:117] "RemoveContainer" containerID="9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf" Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.944313 4780 generic.go:334] "Generic (PLEG): container finished" podID="050d2aa9-9c42-4112-8861-57018b12ec11" containerID="9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf" exitCode=0 Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.944488 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gkkdn" Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.944526 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gkkdn" event={"ID":"050d2aa9-9c42-4112-8861-57018b12ec11","Type":"ContainerDied","Data":"582a2aae42aa5f30939c8ad2db6ad3ad50534b85b162e6db18c0bb16e60b8afa"} Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.946790 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/050d2aa9-9c42-4112-8861-57018b12ec11-kube-api-access-pjcv9" (OuterVolumeSpecName: "kube-api-access-pjcv9") pod "050d2aa9-9c42-4112-8861-57018b12ec11" (UID: "050d2aa9-9c42-4112-8861-57018b12ec11"). InnerVolumeSpecName "kube-api-access-pjcv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:13:36 crc kubenswrapper[4780]: I1210 12:13:36.960998 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:13:36 crc kubenswrapper[4780]: E1210 12:13:36.961373 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.025234 4780 scope.go:117] "RemoveContainer" containerID="5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.026789 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.026827 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjcv9\" (UniqueName: \"kubernetes.io/projected/050d2aa9-9c42-4112-8861-57018b12ec11-kube-api-access-pjcv9\") on node \"crc\" DevicePath \"\"" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.060141 4780 scope.go:117] "RemoveContainer" containerID="a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.117439 4780 scope.go:117] "RemoveContainer" containerID="9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf" Dec 10 12:13:37 crc kubenswrapper[4780]: E1210 12:13:37.118305 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf\": container with ID starting with 9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf not found: ID does not exist" containerID="9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.118376 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf"} err="failed to get container status \"9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf\": rpc error: code = NotFound desc = could not find container \"9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf\": container with ID starting with 9a4146785147b0d095aff58027c8b587013eb37950b42e612ecd9a946e0eaeaf not found: ID does not exist" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.118420 4780 scope.go:117] "RemoveContainer" containerID="5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d" Dec 10 12:13:37 crc kubenswrapper[4780]: E1210 12:13:37.118978 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d\": container with ID starting with 5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d not found: ID does not exist" containerID="5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.119019 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d"} err="failed to get container status \"5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d\": rpc error: code = NotFound desc = could not find container \"5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d\": container with ID starting with 5f1e81825de4a572ccb14b787377b1ed9d61a757b2ecd03b5739b76194a8c11d not found: ID does not exist" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.119050 4780 scope.go:117] "RemoveContainer" containerID="a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f" Dec 10 12:13:37 crc kubenswrapper[4780]: E1210 12:13:37.119397 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f\": container with ID starting with a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f not found: ID does not exist" containerID="a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.119467 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f"} err="failed to get container status \"a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f\": rpc error: code = NotFound desc = could not find container \"a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f\": container with ID starting with a9d53ec6a6779ca7cfea1b4a81855110d5a19f1783a7d9e5158543f11155c72f not found: ID does not exist" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.120213 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "050d2aa9-9c42-4112-8861-57018b12ec11" (UID: "050d2aa9-9c42-4112-8861-57018b12ec11"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.130452 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/050d2aa9-9c42-4112-8861-57018b12ec11-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.293245 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gkkdn"] Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.306728 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gkkdn"] Dec 10 12:13:37 crc kubenswrapper[4780]: I1210 12:13:37.975532 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" path="/var/lib/kubelet/pods/050d2aa9-9c42-4112-8861-57018b12ec11/volumes" Dec 10 12:13:39 crc kubenswrapper[4780]: E1210 12:13:39.962569 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:13:40 crc kubenswrapper[4780]: E1210 12:13:40.960344 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:13:48 crc kubenswrapper[4780]: I1210 12:13:48.961808 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:13:48 crc kubenswrapper[4780]: E1210 12:13:48.963488 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:13:53 crc kubenswrapper[4780]: E1210 12:13:53.963084 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:13:53 crc kubenswrapper[4780]: E1210 12:13:53.963350 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:13:59 crc kubenswrapper[4780]: I1210 12:13:59.959238 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:13:59 crc kubenswrapper[4780]: E1210 12:13:59.960382 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:14:05 crc kubenswrapper[4780]: E1210 12:14:05.972028 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:14:08 crc kubenswrapper[4780]: E1210 12:14:08.962345 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:14:10 crc kubenswrapper[4780]: I1210 12:14:10.959877 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:14:10 crc kubenswrapper[4780]: E1210 12:14:10.960407 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:14:19 crc kubenswrapper[4780]: E1210 12:14:19.962883 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:14:19 crc kubenswrapper[4780]: E1210 12:14:19.962883 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:14:21 crc kubenswrapper[4780]: I1210 12:14:21.961308 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:14:21 crc kubenswrapper[4780]: E1210 12:14:21.964214 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.830897 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nx28d"] Dec 10 12:14:32 crc kubenswrapper[4780]: E1210 12:14:32.834713 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" containerName="registry-server" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.834756 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" containerName="registry-server" Dec 10 12:14:32 crc kubenswrapper[4780]: E1210 12:14:32.834781 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" containerName="extract-content" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.834789 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" containerName="extract-content" Dec 10 12:14:32 crc kubenswrapper[4780]: E1210 12:14:32.834810 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" containerName="extract-utilities" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.834822 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" containerName="extract-utilities" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.835232 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="050d2aa9-9c42-4112-8861-57018b12ec11" containerName="registry-server" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.838213 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.860383 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nx28d"] Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.866755 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-utilities\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.867125 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f77lt\" (UniqueName: \"kubernetes.io/projected/613ac526-f118-45dd-85ef-6413eb4abe14-kube-api-access-f77lt\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.867453 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-catalog-content\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:32 crc kubenswrapper[4780]: E1210 12:14:32.964171 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.978222 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-utilities\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:32 crc kubenswrapper[4780]: E1210 12:14:32.978346 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.978567 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f77lt\" (UniqueName: \"kubernetes.io/projected/613ac526-f118-45dd-85ef-6413eb4abe14-kube-api-access-f77lt\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.979345 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-utilities\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.979365 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-catalog-content\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:32 crc kubenswrapper[4780]: I1210 12:14:32.979740 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-catalog-content\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:33 crc kubenswrapper[4780]: I1210 12:14:33.009243 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f77lt\" (UniqueName: \"kubernetes.io/projected/613ac526-f118-45dd-85ef-6413eb4abe14-kube-api-access-f77lt\") pod \"redhat-operators-nx28d\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:33 crc kubenswrapper[4780]: I1210 12:14:33.185452 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:14:33 crc kubenswrapper[4780]: I1210 12:14:33.960336 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:14:33 crc kubenswrapper[4780]: E1210 12:14:33.965387 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:14:33 crc kubenswrapper[4780]: I1210 12:14:33.998249 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx28d" event={"ID":"613ac526-f118-45dd-85ef-6413eb4abe14","Type":"ContainerStarted","Data":"e83dfd0c492ecefa3013b9467f7848da8efbce298b402306733deda01fac8452"} Dec 10 12:14:33 crc kubenswrapper[4780]: I1210 12:14:33.998303 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nx28d"] Dec 10 12:14:34 crc kubenswrapper[4780]: I1210 12:14:34.977236 4780 generic.go:334] "Generic (PLEG): container finished" podID="613ac526-f118-45dd-85ef-6413eb4abe14" containerID="2aae143c57c7a7b4a2fbbc94db22d8b70921c315f127b5f14cc9cb38d4b0e16e" exitCode=0 Dec 10 12:14:34 crc kubenswrapper[4780]: I1210 12:14:34.977301 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx28d" event={"ID":"613ac526-f118-45dd-85ef-6413eb4abe14","Type":"ContainerDied","Data":"2aae143c57c7a7b4a2fbbc94db22d8b70921c315f127b5f14cc9cb38d4b0e16e"} Dec 10 12:14:37 crc kubenswrapper[4780]: I1210 12:14:37.002976 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx28d" event={"ID":"613ac526-f118-45dd-85ef-6413eb4abe14","Type":"ContainerStarted","Data":"e18b8a28e2a0cf12e157332fc81ef53389d5076a73d1e641f6f285a3f2d5efe4"} Dec 10 12:14:44 crc kubenswrapper[4780]: I1210 12:14:44.790420 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fzkvf"] Dec 10 12:14:44 crc kubenswrapper[4780]: I1210 12:14:44.794037 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:44 crc kubenswrapper[4780]: I1210 12:14:44.802932 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fzkvf"] Dec 10 12:14:44 crc kubenswrapper[4780]: I1210 12:14:44.960118 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:14:44 crc kubenswrapper[4780]: E1210 12:14:44.960563 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:14:44 crc kubenswrapper[4780]: I1210 12:14:44.978985 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srjj9\" (UniqueName: \"kubernetes.io/projected/e889bb4f-2232-42dd-8105-10a90679ddaf-kube-api-access-srjj9\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:44 crc kubenswrapper[4780]: I1210 12:14:44.979324 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-catalog-content\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:44 crc kubenswrapper[4780]: I1210 12:14:44.979535 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-utilities\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:45 crc kubenswrapper[4780]: I1210 12:14:45.082011 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-catalog-content\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:45 crc kubenswrapper[4780]: I1210 12:14:45.082217 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-utilities\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:45 crc kubenswrapper[4780]: I1210 12:14:45.082587 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srjj9\" (UniqueName: \"kubernetes.io/projected/e889bb4f-2232-42dd-8105-10a90679ddaf-kube-api-access-srjj9\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:45 crc kubenswrapper[4780]: I1210 12:14:45.082970 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-utilities\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:45 crc kubenswrapper[4780]: I1210 12:14:45.082998 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-catalog-content\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:14:47 crc kubenswrapper[4780]: E1210 12:14:47.961805 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:14:47 crc kubenswrapper[4780]: E1210 12:14:47.963113 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:14:56 crc kubenswrapper[4780]: I1210 12:14:56.960442 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:14:56 crc kubenswrapper[4780]: E1210 12:14:56.961501 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:14:58 crc kubenswrapper[4780]: E1210 12:14:58.962329 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.189203 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt"] Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.192862 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.196945 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.198150 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.213126 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt"] Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.275559 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-config-volume\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.275784 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sr5j\" (UniqueName: \"kubernetes.io/projected/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-kube-api-access-9sr5j\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.275824 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-secret-volume\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.378245 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-config-volume\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.378443 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sr5j\" (UniqueName: \"kubernetes.io/projected/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-kube-api-access-9sr5j\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.378503 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-secret-volume\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:00 crc kubenswrapper[4780]: I1210 12:15:00.379677 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-config-volume\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:00 crc kubenswrapper[4780]: E1210 12:15:00.986302 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:15:09 crc kubenswrapper[4780]: E1210 12:15:09.984229 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:15:10 crc kubenswrapper[4780]: I1210 12:15:10.960070 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:15:10 crc kubenswrapper[4780]: E1210 12:15:10.960447 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:15:13 crc kubenswrapper[4780]: I1210 12:15:13.701979 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srjj9\" (UniqueName: \"kubernetes.io/projected/e889bb4f-2232-42dd-8105-10a90679ddaf-kube-api-access-srjj9\") pod \"redhat-marketplace-fzkvf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:15:13 crc kubenswrapper[4780]: I1210 12:15:13.703964 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-secret-volume\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:13 crc kubenswrapper[4780]: I1210 12:15:13.711084 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sr5j\" (UniqueName: \"kubernetes.io/projected/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-kube-api-access-9sr5j\") pod \"collect-profiles-29422815-9lsjt\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:13 crc kubenswrapper[4780]: I1210 12:15:13.731081 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:13 crc kubenswrapper[4780]: I1210 12:15:13.749443 4780 trace.go:236] Trace[1412447374]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-index-gateway-0" (10-Dec-2025 12:15:02.803) (total time: 10945ms): Dec 10 12:15:13 crc kubenswrapper[4780]: Trace[1412447374]: [10.945621125s] [10.945621125s] END Dec 10 12:15:13 crc kubenswrapper[4780]: I1210 12:15:13.766503 4780 trace.go:236] Trace[1633707733]: "Calculate volume metrics of storage for pod minio-dev/minio" (10-Dec-2025 12:14:41.856) (total time: 31909ms): Dec 10 12:15:13 crc kubenswrapper[4780]: Trace[1633707733]: [31.909722166s] [31.909722166s] END Dec 10 12:15:13 crc kubenswrapper[4780]: I1210 12:15:13.934882 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:15:14 crc kubenswrapper[4780]: I1210 12:15:14.362510 4780 generic.go:334] "Generic (PLEG): container finished" podID="613ac526-f118-45dd-85ef-6413eb4abe14" containerID="e18b8a28e2a0cf12e157332fc81ef53389d5076a73d1e641f6f285a3f2d5efe4" exitCode=0 Dec 10 12:15:14 crc kubenswrapper[4780]: I1210 12:15:14.362614 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx28d" event={"ID":"613ac526-f118-45dd-85ef-6413eb4abe14","Type":"ContainerDied","Data":"e18b8a28e2a0cf12e157332fc81ef53389d5076a73d1e641f6f285a3f2d5efe4"} Dec 10 12:15:14 crc kubenswrapper[4780]: I1210 12:15:14.775368 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fzkvf"] Dec 10 12:15:14 crc kubenswrapper[4780]: W1210 12:15:14.779247 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode889bb4f_2232_42dd_8105_10a90679ddaf.slice/crio-bbbd9f8ec8a7b604c0819a962732a986394836ce0466d572b4dd86989a7791b0 WatchSource:0}: Error finding container bbbd9f8ec8a7b604c0819a962732a986394836ce0466d572b4dd86989a7791b0: Status 404 returned error can't find the container with id bbbd9f8ec8a7b604c0819a962732a986394836ce0466d572b4dd86989a7791b0 Dec 10 12:15:14 crc kubenswrapper[4780]: W1210 12:15:14.781274 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podadc9e421_64ca_4c6a_a5ff_7ac43203aa2b.slice/crio-27db5ceb0cdd60a29e0a82feb1db4685b1b749e1b5de8ef570e0558abd906f37 WatchSource:0}: Error finding container 27db5ceb0cdd60a29e0a82feb1db4685b1b749e1b5de8ef570e0558abd906f37: Status 404 returned error can't find the container with id 27db5ceb0cdd60a29e0a82feb1db4685b1b749e1b5de8ef570e0558abd906f37 Dec 10 12:15:14 crc kubenswrapper[4780]: I1210 12:15:14.793086 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt"] Dec 10 12:15:14 crc kubenswrapper[4780]: E1210 12:15:14.964880 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:15:15 crc kubenswrapper[4780]: I1210 12:15:15.401979 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fzkvf" event={"ID":"e889bb4f-2232-42dd-8105-10a90679ddaf","Type":"ContainerStarted","Data":"bbbd9f8ec8a7b604c0819a962732a986394836ce0466d572b4dd86989a7791b0"} Dec 10 12:15:15 crc kubenswrapper[4780]: I1210 12:15:15.410507 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" event={"ID":"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b","Type":"ContainerStarted","Data":"27db5ceb0cdd60a29e0a82feb1db4685b1b749e1b5de8ef570e0558abd906f37"} Dec 10 12:15:16 crc kubenswrapper[4780]: I1210 12:15:16.434892 4780 generic.go:334] "Generic (PLEG): container finished" podID="adc9e421-64ca-4c6a-a5ff-7ac43203aa2b" containerID="569086da9c8a72538fc3f3f6010b86633bc848a7eddd51be47770418bff28672" exitCode=0 Dec 10 12:15:16 crc kubenswrapper[4780]: I1210 12:15:16.435089 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" event={"ID":"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b","Type":"ContainerDied","Data":"569086da9c8a72538fc3f3f6010b86633bc848a7eddd51be47770418bff28672"} Dec 10 12:15:16 crc kubenswrapper[4780]: I1210 12:15:16.443710 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx28d" event={"ID":"613ac526-f118-45dd-85ef-6413eb4abe14","Type":"ContainerStarted","Data":"44e67f6d8cbfa9a34729bf3ac6b6de437717469a9fc31ef127c1f30e554e0cd4"} Dec 10 12:15:16 crc kubenswrapper[4780]: I1210 12:15:16.447109 4780 generic.go:334] "Generic (PLEG): container finished" podID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerID="873ae3c621fd9751f617f45ac6d57b1b10f429a44c210fa185de8a7d677f6872" exitCode=0 Dec 10 12:15:16 crc kubenswrapper[4780]: I1210 12:15:16.447335 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fzkvf" event={"ID":"e889bb4f-2232-42dd-8105-10a90679ddaf","Type":"ContainerDied","Data":"873ae3c621fd9751f617f45ac6d57b1b10f429a44c210fa185de8a7d677f6872"} Dec 10 12:15:16 crc kubenswrapper[4780]: I1210 12:15:16.494992 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nx28d" podStartSLOduration=4.136864671 podStartE2EDuration="44.494908425s" podCreationTimestamp="2025-12-10 12:14:32 +0000 UTC" firstStartedPulling="2025-12-10 12:14:34.979774743 +0000 UTC m=+5379.833168186" lastFinishedPulling="2025-12-10 12:15:15.337818497 +0000 UTC m=+5420.191211940" observedRunningTime="2025-12-10 12:15:16.486555403 +0000 UTC m=+5421.339948856" watchObservedRunningTime="2025-12-10 12:15:16.494908425 +0000 UTC m=+5421.348301868" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.007504 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.292954 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-config-volume\") pod \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.293216 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9sr5j\" (UniqueName: \"kubernetes.io/projected/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-kube-api-access-9sr5j\") pod \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.296309 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-config-volume" (OuterVolumeSpecName: "config-volume") pod "adc9e421-64ca-4c6a-a5ff-7ac43203aa2b" (UID: "adc9e421-64ca-4c6a-a5ff-7ac43203aa2b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.310561 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-secret-volume\") pod \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\" (UID: \"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b\") " Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.312278 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.328076 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-kube-api-access-9sr5j" (OuterVolumeSpecName: "kube-api-access-9sr5j") pod "adc9e421-64ca-4c6a-a5ff-7ac43203aa2b" (UID: "adc9e421-64ca-4c6a-a5ff-7ac43203aa2b"). InnerVolumeSpecName "kube-api-access-9sr5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.339229 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "adc9e421-64ca-4c6a-a5ff-7ac43203aa2b" (UID: "adc9e421-64ca-4c6a-a5ff-7ac43203aa2b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.415488 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9sr5j\" (UniqueName: \"kubernetes.io/projected/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-kube-api-access-9sr5j\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.415553 4780 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/adc9e421-64ca-4c6a-a5ff-7ac43203aa2b-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.480014 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fzkvf" event={"ID":"e889bb4f-2232-42dd-8105-10a90679ddaf","Type":"ContainerStarted","Data":"6410a1b6a13019a92afdaef4c442ef71d79dc24b574977c06509a870ac37f35b"} Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.482822 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" event={"ID":"adc9e421-64ca-4c6a-a5ff-7ac43203aa2b","Type":"ContainerDied","Data":"27db5ceb0cdd60a29e0a82feb1db4685b1b749e1b5de8ef570e0558abd906f37"} Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.482874 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27db5ceb0cdd60a29e0a82feb1db4685b1b749e1b5de8ef570e0558abd906f37" Dec 10 12:15:18 crc kubenswrapper[4780]: I1210 12:15:18.482900 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-9lsjt" Dec 10 12:15:19 crc kubenswrapper[4780]: I1210 12:15:19.111298 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm"] Dec 10 12:15:19 crc kubenswrapper[4780]: I1210 12:15:19.123284 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-4chsm"] Dec 10 12:15:19 crc kubenswrapper[4780]: I1210 12:15:19.976345 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6dfd0f3-3c4c-450d-917f-51212e8c6809" path="/var/lib/kubelet/pods/d6dfd0f3-3c4c-450d-917f-51212e8c6809/volumes" Dec 10 12:15:22 crc kubenswrapper[4780]: I1210 12:15:22.575884 4780 generic.go:334] "Generic (PLEG): container finished" podID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerID="6410a1b6a13019a92afdaef4c442ef71d79dc24b574977c06509a870ac37f35b" exitCode=0 Dec 10 12:15:22 crc kubenswrapper[4780]: I1210 12:15:22.575965 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fzkvf" event={"ID":"e889bb4f-2232-42dd-8105-10a90679ddaf","Type":"ContainerDied","Data":"6410a1b6a13019a92afdaef4c442ef71d79dc24b574977c06509a870ac37f35b"} Dec 10 12:15:23 crc kubenswrapper[4780]: I1210 12:15:23.186488 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:15:23 crc kubenswrapper[4780]: I1210 12:15:23.188648 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:15:23 crc kubenswrapper[4780]: I1210 12:15:23.594455 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fzkvf" event={"ID":"e889bb4f-2232-42dd-8105-10a90679ddaf","Type":"ContainerStarted","Data":"50f8902f7aa62fe6c3abca6e34d0eac7cfc1639344e2623b61c4c143fc978242"} Dec 10 12:15:23 crc kubenswrapper[4780]: I1210 12:15:23.633476 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fzkvf" podStartSLOduration=33.013894454 podStartE2EDuration="39.633447952s" podCreationTimestamp="2025-12-10 12:14:44 +0000 UTC" firstStartedPulling="2025-12-10 12:15:16.451080234 +0000 UTC m=+5421.304473677" lastFinishedPulling="2025-12-10 12:15:23.070633732 +0000 UTC m=+5427.924027175" observedRunningTime="2025-12-10 12:15:23.618306278 +0000 UTC m=+5428.471699721" watchObservedRunningTime="2025-12-10 12:15:23.633447952 +0000 UTC m=+5428.486841395" Dec 10 12:15:23 crc kubenswrapper[4780]: I1210 12:15:23.937279 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:15:23 crc kubenswrapper[4780]: I1210 12:15:23.937338 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:15:23 crc kubenswrapper[4780]: I1210 12:15:23.961193 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:15:23 crc kubenswrapper[4780]: E1210 12:15:23.961597 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:15:23 crc kubenswrapper[4780]: E1210 12:15:23.962400 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:15:24 crc kubenswrapper[4780]: I1210 12:15:24.265052 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nx28d" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="registry-server" probeResult="failure" output=< Dec 10 12:15:24 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 12:15:24 crc kubenswrapper[4780]: > Dec 10 12:15:24 crc kubenswrapper[4780]: I1210 12:15:24.997288 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-fzkvf" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="registry-server" probeResult="failure" output=< Dec 10 12:15:24 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 12:15:24 crc kubenswrapper[4780]: > Dec 10 12:15:25 crc kubenswrapper[4780]: E1210 12:15:25.970796 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:15:33 crc kubenswrapper[4780]: I1210 12:15:33.250890 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:15:33 crc kubenswrapper[4780]: I1210 12:15:33.332724 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:15:34 crc kubenswrapper[4780]: I1210 12:15:34.004478 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:15:34 crc kubenswrapper[4780]: I1210 12:15:34.061002 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nx28d"] Dec 10 12:15:34 crc kubenswrapper[4780]: I1210 12:15:34.066889 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:15:34 crc kubenswrapper[4780]: I1210 12:15:34.910701 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nx28d" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="registry-server" containerID="cri-o://44e67f6d8cbfa9a34729bf3ac6b6de437717469a9fc31ef127c1f30e554e0cd4" gracePeriod=2 Dec 10 12:15:35 crc kubenswrapper[4780]: I1210 12:15:35.928982 4780 generic.go:334] "Generic (PLEG): container finished" podID="613ac526-f118-45dd-85ef-6413eb4abe14" containerID="44e67f6d8cbfa9a34729bf3ac6b6de437717469a9fc31ef127c1f30e554e0cd4" exitCode=0 Dec 10 12:15:35 crc kubenswrapper[4780]: I1210 12:15:35.929079 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx28d" event={"ID":"613ac526-f118-45dd-85ef-6413eb4abe14","Type":"ContainerDied","Data":"44e67f6d8cbfa9a34729bf3ac6b6de437717469a9fc31ef127c1f30e554e0cd4"} Dec 10 12:15:35 crc kubenswrapper[4780]: E1210 12:15:35.942422 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod613ac526_f118_45dd_85ef_6413eb4abe14.slice/crio-44e67f6d8cbfa9a34729bf3ac6b6de437717469a9fc31ef127c1f30e554e0cd4.scope\": RecentStats: unable to find data in memory cache]" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.258953 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fzkvf"] Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.259968 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fzkvf" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="registry-server" containerID="cri-o://50f8902f7aa62fe6c3abca6e34d0eac7cfc1639344e2623b61c4c143fc978242" gracePeriod=2 Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.819861 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.868035 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-catalog-content\") pod \"613ac526-f118-45dd-85ef-6413eb4abe14\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.869819 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-utilities\") pod \"613ac526-f118-45dd-85ef-6413eb4abe14\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.870029 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f77lt\" (UniqueName: \"kubernetes.io/projected/613ac526-f118-45dd-85ef-6413eb4abe14-kube-api-access-f77lt\") pod \"613ac526-f118-45dd-85ef-6413eb4abe14\" (UID: \"613ac526-f118-45dd-85ef-6413eb4abe14\") " Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.871446 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-utilities" (OuterVolumeSpecName: "utilities") pod "613ac526-f118-45dd-85ef-6413eb4abe14" (UID: "613ac526-f118-45dd-85ef-6413eb4abe14"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.880103 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/613ac526-f118-45dd-85ef-6413eb4abe14-kube-api-access-f77lt" (OuterVolumeSpecName: "kube-api-access-f77lt") pod "613ac526-f118-45dd-85ef-6413eb4abe14" (UID: "613ac526-f118-45dd-85ef-6413eb4abe14"). InnerVolumeSpecName "kube-api-access-f77lt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.946192 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx28d" event={"ID":"613ac526-f118-45dd-85ef-6413eb4abe14","Type":"ContainerDied","Data":"e83dfd0c492ecefa3013b9467f7848da8efbce298b402306733deda01fac8452"} Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.946857 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nx28d" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.948516 4780 scope.go:117] "RemoveContainer" containerID="44e67f6d8cbfa9a34729bf3ac6b6de437717469a9fc31ef127c1f30e554e0cd4" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.953421 4780 generic.go:334] "Generic (PLEG): container finished" podID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerID="50f8902f7aa62fe6c3abca6e34d0eac7cfc1639344e2623b61c4c143fc978242" exitCode=0 Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.953468 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fzkvf" event={"ID":"e889bb4f-2232-42dd-8105-10a90679ddaf","Type":"ContainerDied","Data":"50f8902f7aa62fe6c3abca6e34d0eac7cfc1639344e2623b61c4c143fc978242"} Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.953500 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fzkvf" event={"ID":"e889bb4f-2232-42dd-8105-10a90679ddaf","Type":"ContainerDied","Data":"bbbd9f8ec8a7b604c0819a962732a986394836ce0466d572b4dd86989a7791b0"} Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.953515 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbbd9f8ec8a7b604c0819a962732a986394836ce0466d572b4dd86989a7791b0" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.954878 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.959738 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:15:36 crc kubenswrapper[4780]: E1210 12:15:36.960334 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.976945 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-catalog-content\") pod \"e889bb4f-2232-42dd-8105-10a90679ddaf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.977588 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-utilities\") pod \"e889bb4f-2232-42dd-8105-10a90679ddaf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.977677 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srjj9\" (UniqueName: \"kubernetes.io/projected/e889bb4f-2232-42dd-8105-10a90679ddaf-kube-api-access-srjj9\") pod \"e889bb4f-2232-42dd-8105-10a90679ddaf\" (UID: \"e889bb4f-2232-42dd-8105-10a90679ddaf\") " Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.982794 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.982843 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f77lt\" (UniqueName: \"kubernetes.io/projected/613ac526-f118-45dd-85ef-6413eb4abe14-kube-api-access-f77lt\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.985138 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-utilities" (OuterVolumeSpecName: "utilities") pod "e889bb4f-2232-42dd-8105-10a90679ddaf" (UID: "e889bb4f-2232-42dd-8105-10a90679ddaf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.985812 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e889bb4f-2232-42dd-8105-10a90679ddaf-kube-api-access-srjj9" (OuterVolumeSpecName: "kube-api-access-srjj9") pod "e889bb4f-2232-42dd-8105-10a90679ddaf" (UID: "e889bb4f-2232-42dd-8105-10a90679ddaf"). InnerVolumeSpecName "kube-api-access-srjj9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:15:36 crc kubenswrapper[4780]: I1210 12:15:36.989788 4780 scope.go:117] "RemoveContainer" containerID="e18b8a28e2a0cf12e157332fc81ef53389d5076a73d1e641f6f285a3f2d5efe4" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.024971 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "613ac526-f118-45dd-85ef-6413eb4abe14" (UID: "613ac526-f118-45dd-85ef-6413eb4abe14"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.034529 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e889bb4f-2232-42dd-8105-10a90679ddaf" (UID: "e889bb4f-2232-42dd-8105-10a90679ddaf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.098523 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.098599 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/613ac526-f118-45dd-85ef-6413eb4abe14-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.098611 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e889bb4f-2232-42dd-8105-10a90679ddaf-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.098623 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srjj9\" (UniqueName: \"kubernetes.io/projected/e889bb4f-2232-42dd-8105-10a90679ddaf-kube-api-access-srjj9\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.117645 4780 scope.go:117] "RemoveContainer" containerID="2aae143c57c7a7b4a2fbbc94db22d8b70921c315f127b5f14cc9cb38d4b0e16e" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.305163 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nx28d"] Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.319596 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nx28d"] Dec 10 12:15:37 crc kubenswrapper[4780]: E1210 12:15:37.962697 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.968401 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fzkvf" Dec 10 12:15:37 crc kubenswrapper[4780]: I1210 12:15:37.977186 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" path="/var/lib/kubelet/pods/613ac526-f118-45dd-85ef-6413eb4abe14/volumes" Dec 10 12:15:38 crc kubenswrapper[4780]: I1210 12:15:38.021783 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fzkvf"] Dec 10 12:15:38 crc kubenswrapper[4780]: I1210 12:15:38.033300 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fzkvf"] Dec 10 12:15:39 crc kubenswrapper[4780]: I1210 12:15:39.974021 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" path="/var/lib/kubelet/pods/e889bb4f-2232-42dd-8105-10a90679ddaf/volumes" Dec 10 12:15:40 crc kubenswrapper[4780]: E1210 12:15:40.962554 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:15:48 crc kubenswrapper[4780]: I1210 12:15:48.468534 4780 scope.go:117] "RemoveContainer" containerID="489a3ff254e426f632724274689fa2866e1cb92ce421ecb30dc54aa3b1fbe22b" Dec 10 12:15:49 crc kubenswrapper[4780]: I1210 12:15:49.961572 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:15:49 crc kubenswrapper[4780]: E1210 12:15:49.962430 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:15:49 crc kubenswrapper[4780]: E1210 12:15:49.962692 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:15:55 crc kubenswrapper[4780]: E1210 12:15:55.978555 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:16:00 crc kubenswrapper[4780]: I1210 12:16:00.960596 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:16:00 crc kubenswrapper[4780]: E1210 12:16:00.962175 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.050273 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh"] Dec 10 12:16:02 crc kubenswrapper[4780]: E1210 12:16:02.052489 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="registry-server" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.052596 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="registry-server" Dec 10 12:16:02 crc kubenswrapper[4780]: E1210 12:16:02.052730 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="extract-content" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.052814 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="extract-content" Dec 10 12:16:02 crc kubenswrapper[4780]: E1210 12:16:02.052896 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="registry-server" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.052987 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="registry-server" Dec 10 12:16:02 crc kubenswrapper[4780]: E1210 12:16:02.053084 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="extract-content" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.053148 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="extract-content" Dec 10 12:16:02 crc kubenswrapper[4780]: E1210 12:16:02.053220 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="extract-utilities" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.053287 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="extract-utilities" Dec 10 12:16:02 crc kubenswrapper[4780]: E1210 12:16:02.053378 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="extract-utilities" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.053441 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="extract-utilities" Dec 10 12:16:02 crc kubenswrapper[4780]: E1210 12:16:02.053506 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adc9e421-64ca-4c6a-a5ff-7ac43203aa2b" containerName="collect-profiles" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.053600 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="adc9e421-64ca-4c6a-a5ff-7ac43203aa2b" containerName="collect-profiles" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.053998 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="adc9e421-64ca-4c6a-a5ff-7ac43203aa2b" containerName="collect-profiles" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.054147 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="e889bb4f-2232-42dd-8105-10a90679ddaf" containerName="registry-server" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.054236 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="613ac526-f118-45dd-85ef-6413eb4abe14" containerName="registry-server" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.055448 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.059398 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.059628 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.059771 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.060080 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-p2qrb" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.066517 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh"] Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.129661 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rffkp\" (UniqueName: \"kubernetes.io/projected/30f945df-c349-44c8-8276-542b90117955-kube-api-access-rffkp\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.129907 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.129992 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.232641 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rffkp\" (UniqueName: \"kubernetes.io/projected/30f945df-c349-44c8-8276-542b90117955-kube-api-access-rffkp\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.233198 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.234282 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.241714 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.241714 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.252594 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rffkp\" (UniqueName: \"kubernetes.io/projected/30f945df-c349-44c8-8276-542b90117955-kube-api-access-rffkp\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: I1210 12:16:02.389122 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:16:02 crc kubenswrapper[4780]: E1210 12:16:02.963375 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:16:03 crc kubenswrapper[4780]: I1210 12:16:03.297275 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh"] Dec 10 12:16:03 crc kubenswrapper[4780]: I1210 12:16:03.346048 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" event={"ID":"30f945df-c349-44c8-8276-542b90117955","Type":"ContainerStarted","Data":"79e5b489874fa77463cbd04b0837f85eb4c9d9ad257e6fcec2b9aa90f844d89c"} Dec 10 12:16:06 crc kubenswrapper[4780]: I1210 12:16:06.389934 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" event={"ID":"30f945df-c349-44c8-8276-542b90117955","Type":"ContainerStarted","Data":"a71e6103819457e17db16eb013919c953492cb78ae69857ca64ac0a2ea548d15"} Dec 10 12:16:06 crc kubenswrapper[4780]: I1210 12:16:06.439148 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" podStartSLOduration=2.6469943049999998 podStartE2EDuration="4.439122005s" podCreationTimestamp="2025-12-10 12:16:02 +0000 UTC" firstStartedPulling="2025-12-10 12:16:03.295336863 +0000 UTC m=+5468.148730306" lastFinishedPulling="2025-12-10 12:16:05.087464563 +0000 UTC m=+5469.940858006" observedRunningTime="2025-12-10 12:16:06.431798129 +0000 UTC m=+5471.285191592" watchObservedRunningTime="2025-12-10 12:16:06.439122005 +0000 UTC m=+5471.292515448" Dec 10 12:16:06 crc kubenswrapper[4780]: E1210 12:16:06.961636 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:16:13 crc kubenswrapper[4780]: I1210 12:16:13.959779 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:16:13 crc kubenswrapper[4780]: E1210 12:16:13.961348 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:16:16 crc kubenswrapper[4780]: E1210 12:16:16.964372 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:16:21 crc kubenswrapper[4780]: E1210 12:16:21.963630 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:16:28 crc kubenswrapper[4780]: I1210 12:16:28.026825 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:16:28 crc kubenswrapper[4780]: E1210 12:16:28.027945 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.237678 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vx8pw"] Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.245940 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.272727 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vx8pw"] Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.333336 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-catalog-content\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.333447 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-utilities\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.333563 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29tjn\" (UniqueName: \"kubernetes.io/projected/5d629502-fd3a-4878-b244-a838d3d93349-kube-api-access-29tjn\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.436105 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-catalog-content\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.436798 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-utilities\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.436812 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-catalog-content\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.437060 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29tjn\" (UniqueName: \"kubernetes.io/projected/5d629502-fd3a-4878-b244-a838d3d93349-kube-api-access-29tjn\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.437166 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-utilities\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.467127 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29tjn\" (UniqueName: \"kubernetes.io/projected/5d629502-fd3a-4878-b244-a838d3d93349-kube-api-access-29tjn\") pod \"certified-operators-vx8pw\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:29 crc kubenswrapper[4780]: I1210 12:16:29.604498 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:30 crc kubenswrapper[4780]: I1210 12:16:30.257955 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vx8pw"] Dec 10 12:16:31 crc kubenswrapper[4780]: E1210 12:16:31.092102 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:16:31 crc kubenswrapper[4780]: I1210 12:16:31.107346 4780 generic.go:334] "Generic (PLEG): container finished" podID="5d629502-fd3a-4878-b244-a838d3d93349" containerID="488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9" exitCode=0 Dec 10 12:16:31 crc kubenswrapper[4780]: I1210 12:16:31.107412 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx8pw" event={"ID":"5d629502-fd3a-4878-b244-a838d3d93349","Type":"ContainerDied","Data":"488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9"} Dec 10 12:16:31 crc kubenswrapper[4780]: I1210 12:16:31.107453 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx8pw" event={"ID":"5d629502-fd3a-4878-b244-a838d3d93349","Type":"ContainerStarted","Data":"08a453a0d8dc7229b23b1d6ef9941646ad92f5daf8e6c945f13d5be8fb0a1be5"} Dec 10 12:16:33 crc kubenswrapper[4780]: I1210 12:16:33.206543 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx8pw" event={"ID":"5d629502-fd3a-4878-b244-a838d3d93349","Type":"ContainerStarted","Data":"46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18"} Dec 10 12:16:34 crc kubenswrapper[4780]: I1210 12:16:34.231227 4780 generic.go:334] "Generic (PLEG): container finished" podID="5d629502-fd3a-4878-b244-a838d3d93349" containerID="46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18" exitCode=0 Dec 10 12:16:34 crc kubenswrapper[4780]: I1210 12:16:34.232380 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx8pw" event={"ID":"5d629502-fd3a-4878-b244-a838d3d93349","Type":"ContainerDied","Data":"46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18"} Dec 10 12:16:34 crc kubenswrapper[4780]: E1210 12:16:34.960537 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:16:36 crc kubenswrapper[4780]: I1210 12:16:36.261429 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx8pw" event={"ID":"5d629502-fd3a-4878-b244-a838d3d93349","Type":"ContainerStarted","Data":"fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a"} Dec 10 12:16:36 crc kubenswrapper[4780]: I1210 12:16:36.291227 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vx8pw" podStartSLOduration=3.516243056 podStartE2EDuration="7.29120046s" podCreationTimestamp="2025-12-10 12:16:29 +0000 UTC" firstStartedPulling="2025-12-10 12:16:31.110913515 +0000 UTC m=+5495.964306958" lastFinishedPulling="2025-12-10 12:16:34.885870929 +0000 UTC m=+5499.739264362" observedRunningTime="2025-12-10 12:16:36.290165374 +0000 UTC m=+5501.143558897" watchObservedRunningTime="2025-12-10 12:16:36.29120046 +0000 UTC m=+5501.144593903" Dec 10 12:16:39 crc kubenswrapper[4780]: I1210 12:16:39.605002 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:39 crc kubenswrapper[4780]: I1210 12:16:39.605836 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:39 crc kubenswrapper[4780]: I1210 12:16:39.670064 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:40 crc kubenswrapper[4780]: I1210 12:16:40.384450 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:40 crc kubenswrapper[4780]: I1210 12:16:40.464536 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vx8pw"] Dec 10 12:16:42 crc kubenswrapper[4780]: I1210 12:16:42.346784 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vx8pw" podUID="5d629502-fd3a-4878-b244-a838d3d93349" containerName="registry-server" containerID="cri-o://fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a" gracePeriod=2 Dec 10 12:16:42 crc kubenswrapper[4780]: I1210 12:16:42.959630 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:16:42 crc kubenswrapper[4780]: E1210 12:16:42.960890 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:16:42 crc kubenswrapper[4780]: I1210 12:16:42.993778 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.100371 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-catalog-content\") pod \"5d629502-fd3a-4878-b244-a838d3d93349\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.100779 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-utilities\") pod \"5d629502-fd3a-4878-b244-a838d3d93349\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.100862 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29tjn\" (UniqueName: \"kubernetes.io/projected/5d629502-fd3a-4878-b244-a838d3d93349-kube-api-access-29tjn\") pod \"5d629502-fd3a-4878-b244-a838d3d93349\" (UID: \"5d629502-fd3a-4878-b244-a838d3d93349\") " Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.102001 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-utilities" (OuterVolumeSpecName: "utilities") pod "5d629502-fd3a-4878-b244-a838d3d93349" (UID: "5d629502-fd3a-4878-b244-a838d3d93349"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.109440 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d629502-fd3a-4878-b244-a838d3d93349-kube-api-access-29tjn" (OuterVolumeSpecName: "kube-api-access-29tjn") pod "5d629502-fd3a-4878-b244-a838d3d93349" (UID: "5d629502-fd3a-4878-b244-a838d3d93349"). InnerVolumeSpecName "kube-api-access-29tjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.159275 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5d629502-fd3a-4878-b244-a838d3d93349" (UID: "5d629502-fd3a-4878-b244-a838d3d93349"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.204970 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.205026 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d629502-fd3a-4878-b244-a838d3d93349-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.205039 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29tjn\" (UniqueName: \"kubernetes.io/projected/5d629502-fd3a-4878-b244-a838d3d93349-kube-api-access-29tjn\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.363365 4780 generic.go:334] "Generic (PLEG): container finished" podID="5d629502-fd3a-4878-b244-a838d3d93349" containerID="fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a" exitCode=0 Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.363431 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx8pw" event={"ID":"5d629502-fd3a-4878-b244-a838d3d93349","Type":"ContainerDied","Data":"fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a"} Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.363470 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vx8pw" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.363498 4780 scope.go:117] "RemoveContainer" containerID="fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.363477 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vx8pw" event={"ID":"5d629502-fd3a-4878-b244-a838d3d93349","Type":"ContainerDied","Data":"08a453a0d8dc7229b23b1d6ef9941646ad92f5daf8e6c945f13d5be8fb0a1be5"} Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.444964 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vx8pw"] Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.446882 4780 scope.go:117] "RemoveContainer" containerID="46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18" Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.462109 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vx8pw"] Dec 10 12:16:43 crc kubenswrapper[4780]: I1210 12:16:43.979420 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d629502-fd3a-4878-b244-a838d3d93349" path="/var/lib/kubelet/pods/5d629502-fd3a-4878-b244-a838d3d93349/volumes" Dec 10 12:16:45 crc kubenswrapper[4780]: I1210 12:16:45.026480 4780 scope.go:117] "RemoveContainer" containerID="488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9" Dec 10 12:16:45 crc kubenswrapper[4780]: I1210 12:16:45.187403 4780 scope.go:117] "RemoveContainer" containerID="fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a" Dec 10 12:16:45 crc kubenswrapper[4780]: E1210 12:16:45.193805 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a\": container with ID starting with fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a not found: ID does not exist" containerID="fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a" Dec 10 12:16:45 crc kubenswrapper[4780]: I1210 12:16:45.193892 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a"} err="failed to get container status \"fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a\": rpc error: code = NotFound desc = could not find container \"fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a\": container with ID starting with fdbcd12fe5f64a282649e0a1c5b5bead3c5bf29507ec5814ce73474d7fe98e5a not found: ID does not exist" Dec 10 12:16:45 crc kubenswrapper[4780]: I1210 12:16:45.193963 4780 scope.go:117] "RemoveContainer" containerID="46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18" Dec 10 12:16:45 crc kubenswrapper[4780]: E1210 12:16:45.196207 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18\": container with ID starting with 46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18 not found: ID does not exist" containerID="46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18" Dec 10 12:16:45 crc kubenswrapper[4780]: I1210 12:16:45.196245 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18"} err="failed to get container status \"46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18\": rpc error: code = NotFound desc = could not find container \"46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18\": container with ID starting with 46485b1e565f4b817f87461a6c83d405069f148399ab9bdb89f0d716ce9aed18 not found: ID does not exist" Dec 10 12:16:45 crc kubenswrapper[4780]: I1210 12:16:45.196264 4780 scope.go:117] "RemoveContainer" containerID="488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9" Dec 10 12:16:45 crc kubenswrapper[4780]: E1210 12:16:45.197786 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9\": container with ID starting with 488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9 not found: ID does not exist" containerID="488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9" Dec 10 12:16:45 crc kubenswrapper[4780]: I1210 12:16:45.197855 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9"} err="failed to get container status \"488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9\": rpc error: code = NotFound desc = could not find container \"488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9\": container with ID starting with 488b23082edcb5e11d8ae1e4fea3bc12bed0ca28677cbb7220eb8451544896f9 not found: ID does not exist" Dec 10 12:16:45 crc kubenswrapper[4780]: E1210 12:16:45.981230 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:16:49 crc kubenswrapper[4780]: E1210 12:16:49.963150 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:16:55 crc kubenswrapper[4780]: I1210 12:16:55.974714 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:16:55 crc kubenswrapper[4780]: E1210 12:16:55.979123 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:16:57 crc kubenswrapper[4780]: E1210 12:16:57.964028 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:17:01 crc kubenswrapper[4780]: E1210 12:17:01.962006 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:17:06 crc kubenswrapper[4780]: I1210 12:17:06.960370 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:17:06 crc kubenswrapper[4780]: E1210 12:17:06.962125 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:17:08 crc kubenswrapper[4780]: E1210 12:17:08.961391 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:17:12 crc kubenswrapper[4780]: E1210 12:17:12.964238 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:17:20 crc kubenswrapper[4780]: I1210 12:17:20.960512 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:17:20 crc kubenswrapper[4780]: E1210 12:17:20.961642 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:17:23 crc kubenswrapper[4780]: E1210 12:17:23.963540 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:17:26 crc kubenswrapper[4780]: I1210 12:17:26.962588 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:17:27 crc kubenswrapper[4780]: E1210 12:17:27.102123 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:17:27 crc kubenswrapper[4780]: E1210 12:17:27.102229 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:17:27 crc kubenswrapper[4780]: E1210 12:17:27.102673 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:17:27 crc kubenswrapper[4780]: E1210 12:17:27.104027 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:17:35 crc kubenswrapper[4780]: I1210 12:17:35.979623 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:17:36 crc kubenswrapper[4780]: I1210 12:17:36.908152 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"d9aa121d6117957daf94487eb5a96d86e219315a547b12e5697d28ed83b885b7"} Dec 10 12:17:36 crc kubenswrapper[4780]: E1210 12:17:36.964307 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:17:37 crc kubenswrapper[4780]: E1210 12:17:37.963078 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:17:48 crc kubenswrapper[4780]: E1210 12:17:48.051585 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:17:48 crc kubenswrapper[4780]: E1210 12:17:48.052721 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:17:48 crc kubenswrapper[4780]: E1210 12:17:48.053181 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:17:48 crc kubenswrapper[4780]: E1210 12:17:48.054851 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:17:48 crc kubenswrapper[4780]: E1210 12:17:48.965188 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:18:01 crc kubenswrapper[4780]: E1210 12:18:01.963810 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:18:02 crc kubenswrapper[4780]: E1210 12:18:02.961878 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:18:16 crc kubenswrapper[4780]: E1210 12:18:16.962147 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:18:17 crc kubenswrapper[4780]: E1210 12:18:17.963478 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:18:27 crc kubenswrapper[4780]: E1210 12:18:27.965272 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:18:29 crc kubenswrapper[4780]: E1210 12:18:29.962257 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:18:38 crc kubenswrapper[4780]: E1210 12:18:38.962073 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:18:43 crc kubenswrapper[4780]: E1210 12:18:43.964380 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:18:50 crc kubenswrapper[4780]: E1210 12:18:50.968489 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:18:58 crc kubenswrapper[4780]: E1210 12:18:58.963900 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:19:03 crc kubenswrapper[4780]: E1210 12:19:03.961761 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:19:12 crc kubenswrapper[4780]: E1210 12:19:12.962213 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:19:15 crc kubenswrapper[4780]: E1210 12:19:15.980319 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:19:23 crc kubenswrapper[4780]: E1210 12:19:23.963141 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:19:28 crc kubenswrapper[4780]: E1210 12:19:28.966422 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:19:36 crc kubenswrapper[4780]: E1210 12:19:36.965513 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:19:40 crc kubenswrapper[4780]: E1210 12:19:40.962308 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:19:49 crc kubenswrapper[4780]: E1210 12:19:49.963831 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:19:53 crc kubenswrapper[4780]: E1210 12:19:53.963935 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:19:57 crc kubenswrapper[4780]: I1210 12:19:57.477327 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:19:57 crc kubenswrapper[4780]: I1210 12:19:57.478150 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:20:01 crc kubenswrapper[4780]: E1210 12:20:01.963335 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:20:04 crc kubenswrapper[4780]: E1210 12:20:04.961163 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:20:16 crc kubenswrapper[4780]: E1210 12:20:16.962725 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:20:19 crc kubenswrapper[4780]: E1210 12:20:19.968421 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:20:27 crc kubenswrapper[4780]: I1210 12:20:27.476116 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:20:27 crc kubenswrapper[4780]: I1210 12:20:27.476728 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:20:30 crc kubenswrapper[4780]: E1210 12:20:30.962670 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:20:32 crc kubenswrapper[4780]: E1210 12:20:32.962903 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:20:42 crc kubenswrapper[4780]: E1210 12:20:42.964365 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:20:47 crc kubenswrapper[4780]: E1210 12:20:47.962981 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:20:57 crc kubenswrapper[4780]: I1210 12:20:57.475560 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:20:57 crc kubenswrapper[4780]: I1210 12:20:57.476070 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:20:57 crc kubenswrapper[4780]: I1210 12:20:57.476143 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 12:20:57 crc kubenswrapper[4780]: I1210 12:20:57.477303 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d9aa121d6117957daf94487eb5a96d86e219315a547b12e5697d28ed83b885b7"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:20:57 crc kubenswrapper[4780]: I1210 12:20:57.477364 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://d9aa121d6117957daf94487eb5a96d86e219315a547b12e5697d28ed83b885b7" gracePeriod=600 Dec 10 12:20:57 crc kubenswrapper[4780]: E1210 12:20:57.961946 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:20:58 crc kubenswrapper[4780]: I1210 12:20:58.326827 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="d9aa121d6117957daf94487eb5a96d86e219315a547b12e5697d28ed83b885b7" exitCode=0 Dec 10 12:20:58 crc kubenswrapper[4780]: I1210 12:20:58.327257 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"d9aa121d6117957daf94487eb5a96d86e219315a547b12e5697d28ed83b885b7"} Dec 10 12:20:58 crc kubenswrapper[4780]: I1210 12:20:58.327361 4780 scope.go:117] "RemoveContainer" containerID="780e216eb9a89407d8eede2a2a33f67ab4bb1144b34a671b270f0500ea31a372" Dec 10 12:20:59 crc kubenswrapper[4780]: I1210 12:20:59.345073 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176"} Dec 10 12:21:01 crc kubenswrapper[4780]: E1210 12:21:01.962604 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:21:12 crc kubenswrapper[4780]: E1210 12:21:12.965258 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:21:15 crc kubenswrapper[4780]: E1210 12:21:15.978118 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:21:25 crc kubenswrapper[4780]: E1210 12:21:25.965218 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:21:30 crc kubenswrapper[4780]: E1210 12:21:30.964769 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:21:39 crc kubenswrapper[4780]: E1210 12:21:39.962347 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.721343 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tjsq4/must-gather-d5bhw"] Dec 10 12:21:40 crc kubenswrapper[4780]: E1210 12:21:40.722868 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d629502-fd3a-4878-b244-a838d3d93349" containerName="extract-utilities" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.723007 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d629502-fd3a-4878-b244-a838d3d93349" containerName="extract-utilities" Dec 10 12:21:40 crc kubenswrapper[4780]: E1210 12:21:40.723137 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d629502-fd3a-4878-b244-a838d3d93349" containerName="registry-server" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.723221 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d629502-fd3a-4878-b244-a838d3d93349" containerName="registry-server" Dec 10 12:21:40 crc kubenswrapper[4780]: E1210 12:21:40.723300 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d629502-fd3a-4878-b244-a838d3d93349" containerName="extract-content" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.723367 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d629502-fd3a-4878-b244-a838d3d93349" containerName="extract-content" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.723787 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d629502-fd3a-4878-b244-a838d3d93349" containerName="registry-server" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.730266 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.738366 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-tjsq4"/"openshift-service-ca.crt" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.739783 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-tjsq4"/"kube-root-ca.crt" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.763147 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psn2m\" (UniqueName: \"kubernetes.io/projected/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-kube-api-access-psn2m\") pod \"must-gather-d5bhw\" (UID: \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\") " pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.763630 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-tjsq4"/"default-dockercfg-qvbxb" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.765100 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-must-gather-output\") pod \"must-gather-d5bhw\" (UID: \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\") " pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.796168 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-tjsq4/must-gather-d5bhw"] Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.869879 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-must-gather-output\") pod \"must-gather-d5bhw\" (UID: \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\") " pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.870031 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psn2m\" (UniqueName: \"kubernetes.io/projected/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-kube-api-access-psn2m\") pod \"must-gather-d5bhw\" (UID: \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\") " pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.871076 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-must-gather-output\") pod \"must-gather-d5bhw\" (UID: \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\") " pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:21:40 crc kubenswrapper[4780]: I1210 12:21:40.916134 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psn2m\" (UniqueName: \"kubernetes.io/projected/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-kube-api-access-psn2m\") pod \"must-gather-d5bhw\" (UID: \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\") " pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:21:41 crc kubenswrapper[4780]: I1210 12:21:41.063234 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:21:41 crc kubenswrapper[4780]: I1210 12:21:41.658291 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-tjsq4/must-gather-d5bhw"] Dec 10 12:21:41 crc kubenswrapper[4780]: E1210 12:21:41.963816 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:21:41 crc kubenswrapper[4780]: I1210 12:21:41.983776 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" event={"ID":"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9","Type":"ContainerStarted","Data":"f4b9354dd5c8c1ba621095dfe085dbe6e1160f8a819b5a802eccb9e80805ba90"} Dec 10 12:21:48 crc kubenswrapper[4780]: I1210 12:21:48.784598 4780 scope.go:117] "RemoveContainer" containerID="6410a1b6a13019a92afdaef4c442ef71d79dc24b574977c06509a870ac37f35b" Dec 10 12:21:52 crc kubenswrapper[4780]: E1210 12:21:52.963433 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:21:54 crc kubenswrapper[4780]: I1210 12:21:54.254944 4780 scope.go:117] "RemoveContainer" containerID="50f8902f7aa62fe6c3abca6e34d0eac7cfc1639344e2623b61c4c143fc978242" Dec 10 12:21:54 crc kubenswrapper[4780]: I1210 12:21:54.306314 4780 scope.go:117] "RemoveContainer" containerID="873ae3c621fd9751f617f45ac6d57b1b10f429a44c210fa185de8a7d677f6872" Dec 10 12:21:54 crc kubenswrapper[4780]: E1210 12:21:54.963414 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:21:55 crc kubenswrapper[4780]: I1210 12:21:55.475872 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" event={"ID":"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9","Type":"ContainerStarted","Data":"3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce"} Dec 10 12:21:55 crc kubenswrapper[4780]: I1210 12:21:55.475952 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" event={"ID":"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9","Type":"ContainerStarted","Data":"98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a"} Dec 10 12:21:55 crc kubenswrapper[4780]: I1210 12:21:55.517822 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" podStartSLOduration=2.82703516 podStartE2EDuration="15.517795963s" podCreationTimestamp="2025-12-10 12:21:40 +0000 UTC" firstStartedPulling="2025-12-10 12:21:41.726444524 +0000 UTC m=+5806.579837967" lastFinishedPulling="2025-12-10 12:21:54.417205337 +0000 UTC m=+5819.270598770" observedRunningTime="2025-12-10 12:21:55.504040664 +0000 UTC m=+5820.357434107" watchObservedRunningTime="2025-12-10 12:21:55.517795963 +0000 UTC m=+5820.371189406" Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.557065 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tjsq4/crc-debug-xt4xs"] Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.560437 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.700189 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gblm8\" (UniqueName: \"kubernetes.io/projected/a81db9b5-c517-47f7-b874-0814650d6be2-kube-api-access-gblm8\") pod \"crc-debug-xt4xs\" (UID: \"a81db9b5-c517-47f7-b874-0814650d6be2\") " pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.700947 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a81db9b5-c517-47f7-b874-0814650d6be2-host\") pod \"crc-debug-xt4xs\" (UID: \"a81db9b5-c517-47f7-b874-0814650d6be2\") " pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.803716 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gblm8\" (UniqueName: \"kubernetes.io/projected/a81db9b5-c517-47f7-b874-0814650d6be2-kube-api-access-gblm8\") pod \"crc-debug-xt4xs\" (UID: \"a81db9b5-c517-47f7-b874-0814650d6be2\") " pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.803854 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a81db9b5-c517-47f7-b874-0814650d6be2-host\") pod \"crc-debug-xt4xs\" (UID: \"a81db9b5-c517-47f7-b874-0814650d6be2\") " pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.804141 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a81db9b5-c517-47f7-b874-0814650d6be2-host\") pod \"crc-debug-xt4xs\" (UID: \"a81db9b5-c517-47f7-b874-0814650d6be2\") " pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.829422 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gblm8\" (UniqueName: \"kubernetes.io/projected/a81db9b5-c517-47f7-b874-0814650d6be2-kube-api-access-gblm8\") pod \"crc-debug-xt4xs\" (UID: \"a81db9b5-c517-47f7-b874-0814650d6be2\") " pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:22:03 crc kubenswrapper[4780]: I1210 12:22:03.897586 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:22:04 crc kubenswrapper[4780]: I1210 12:22:04.706797 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" event={"ID":"a81db9b5-c517-47f7-b874-0814650d6be2","Type":"ContainerStarted","Data":"7b994f4ad501baabf6023b3ad5348917860825a2d665618bb76992131a07560e"} Dec 10 12:22:05 crc kubenswrapper[4780]: E1210 12:22:05.983048 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:22:06 crc kubenswrapper[4780]: E1210 12:22:06.962055 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:22:18 crc kubenswrapper[4780]: E1210 12:22:18.963819 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:22:20 crc kubenswrapper[4780]: E1210 12:22:20.962692 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:22:21 crc kubenswrapper[4780]: E1210 12:22:21.581655 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296" Dec 10 12:22:21 crc kubenswrapper[4780]: E1210 12:22:21.582100 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-00,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296,Command:[chroot /host bash -c echo 'TOOLBOX_NAME=toolbox-osp' > /root/.toolboxrc ; rm -rf \"/var/tmp/sos-osp\" && mkdir -p \"/var/tmp/sos-osp\" && sudo podman rm --force toolbox-osp; sudo --preserve-env podman pull --authfile /var/lib/kubelet/config.json registry.redhat.io/rhel9/support-tools && toolbox sos report --batch --all-logs --only-plugins block,cifs,crio,devicemapper,devices,firewall_tables,firewalld,iscsi,lvm2,memory,multipath,nfs,nis,nvme,podman,process,processor,selinux,scsi,udev,logs,crypto --tmp-dir=\"/var/tmp/sos-osp\" && if [[ \"$(ls /var/log/pods/*/{*.log.*,*/*.log.*} 2>/dev/null)\" != '' ]]; then tar --ignore-failed-read --warning=no-file-changed -cJf \"/var/tmp/sos-osp/podlogs.tar.xz\" --transform 's,^,podlogs/,' /var/log/pods/*/{*.log.*,*/*.log.*} || true; fi],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:TMOUT,Value:900,ValueFrom:nil,},EnvVar{Name:HOST,Value:/host,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host,ReadOnly:false,MountPath:/host,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gblm8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod crc-debug-xt4xs_openshift-must-gather-tjsq4(a81db9b5-c517-47f7-b874-0814650d6be2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 12:22:21 crc kubenswrapper[4780]: E1210 12:22:21.583269 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" podUID="a81db9b5-c517-47f7-b874-0814650d6be2" Dec 10 12:22:22 crc kubenswrapper[4780]: E1210 12:22:22.002733 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296\\\"\"" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" podUID="a81db9b5-c517-47f7-b874-0814650d6be2" Dec 10 12:22:32 crc kubenswrapper[4780]: I1210 12:22:32.963437 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:22:32 crc kubenswrapper[4780]: E1210 12:22:32.965236 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:22:33 crc kubenswrapper[4780]: E1210 12:22:33.046380 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:22:33 crc kubenswrapper[4780]: E1210 12:22:33.046495 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:22:33 crc kubenswrapper[4780]: E1210 12:22:33.046738 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:22:33 crc kubenswrapper[4780]: E1210 12:22:33.047941 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:22:35 crc kubenswrapper[4780]: I1210 12:22:35.160866 4780 generic.go:334] "Generic (PLEG): container finished" podID="30f945df-c349-44c8-8276-542b90117955" containerID="a71e6103819457e17db16eb013919c953492cb78ae69857ca64ac0a2ea548d15" exitCode=2 Dec 10 12:22:35 crc kubenswrapper[4780]: I1210 12:22:35.161169 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" event={"ID":"30f945df-c349-44c8-8276-542b90117955","Type":"ContainerDied","Data":"a71e6103819457e17db16eb013919c953492cb78ae69857ca64ac0a2ea548d15"} Dec 10 12:22:36 crc kubenswrapper[4780]: I1210 12:22:36.890455 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.087905 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-inventory\") pod \"30f945df-c349-44c8-8276-542b90117955\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.088019 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rffkp\" (UniqueName: \"kubernetes.io/projected/30f945df-c349-44c8-8276-542b90117955-kube-api-access-rffkp\") pod \"30f945df-c349-44c8-8276-542b90117955\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.088062 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-ssh-key\") pod \"30f945df-c349-44c8-8276-542b90117955\" (UID: \"30f945df-c349-44c8-8276-542b90117955\") " Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.095774 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30f945df-c349-44c8-8276-542b90117955-kube-api-access-rffkp" (OuterVolumeSpecName: "kube-api-access-rffkp") pod "30f945df-c349-44c8-8276-542b90117955" (UID: "30f945df-c349-44c8-8276-542b90117955"). InnerVolumeSpecName "kube-api-access-rffkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.129229 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "30f945df-c349-44c8-8276-542b90117955" (UID: "30f945df-c349-44c8-8276-542b90117955"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.138089 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-inventory" (OuterVolumeSpecName: "inventory") pod "30f945df-c349-44c8-8276-542b90117955" (UID: "30f945df-c349-44c8-8276-542b90117955"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.192096 4780 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.192485 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rffkp\" (UniqueName: \"kubernetes.io/projected/30f945df-c349-44c8-8276-542b90117955-kube-api-access-rffkp\") on node \"crc\" DevicePath \"\"" Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.192505 4780 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/30f945df-c349-44c8-8276-542b90117955-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.194752 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" event={"ID":"30f945df-c349-44c8-8276-542b90117955","Type":"ContainerDied","Data":"79e5b489874fa77463cbd04b0837f85eb4c9d9ad257e6fcec2b9aa90f844d89c"} Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.194802 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79e5b489874fa77463cbd04b0837f85eb4c9d9ad257e6fcec2b9aa90f844d89c" Dec 10 12:22:37 crc kubenswrapper[4780]: I1210 12:22:37.194890 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh" Dec 10 12:22:38 crc kubenswrapper[4780]: I1210 12:22:38.214641 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" event={"ID":"a81db9b5-c517-47f7-b874-0814650d6be2","Type":"ContainerStarted","Data":"fe4dfda8399a1d560043d4ef0a7230a3ce9f66f5ee89ff9315f67e39ae4c5df5"} Dec 10 12:22:38 crc kubenswrapper[4780]: I1210 12:22:38.237974 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" podStartSLOduration=1.725190536 podStartE2EDuration="35.237910338s" podCreationTimestamp="2025-12-10 12:22:03 +0000 UTC" firstStartedPulling="2025-12-10 12:22:03.970151172 +0000 UTC m=+5828.823544615" lastFinishedPulling="2025-12-10 12:22:37.482870974 +0000 UTC m=+5862.336264417" observedRunningTime="2025-12-10 12:22:38.234179474 +0000 UTC m=+5863.087572927" watchObservedRunningTime="2025-12-10 12:22:38.237910338 +0000 UTC m=+5863.091303781" Dec 10 12:22:44 crc kubenswrapper[4780]: E1210 12:22:44.964157 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:22:46 crc kubenswrapper[4780]: E1210 12:22:46.961634 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:22:58 crc kubenswrapper[4780]: E1210 12:22:58.965685 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:22:59 crc kubenswrapper[4780]: E1210 12:22:59.099617 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:22:59 crc kubenswrapper[4780]: E1210 12:22:59.099713 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:22:59 crc kubenswrapper[4780]: E1210 12:22:59.099916 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:22:59 crc kubenswrapper[4780]: E1210 12:22:59.101325 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:23:07 crc kubenswrapper[4780]: I1210 12:23:07.646603 4780 generic.go:334] "Generic (PLEG): container finished" podID="a81db9b5-c517-47f7-b874-0814650d6be2" containerID="fe4dfda8399a1d560043d4ef0a7230a3ce9f66f5ee89ff9315f67e39ae4c5df5" exitCode=0 Dec 10 12:23:07 crc kubenswrapper[4780]: I1210 12:23:07.646658 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" event={"ID":"a81db9b5-c517-47f7-b874-0814650d6be2","Type":"ContainerDied","Data":"fe4dfda8399a1d560043d4ef0a7230a3ce9f66f5ee89ff9315f67e39ae4c5df5"} Dec 10 12:23:08 crc kubenswrapper[4780]: I1210 12:23:08.950731 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.005336 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-tjsq4/crc-debug-xt4xs"] Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.018555 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-tjsq4/crc-debug-xt4xs"] Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.038786 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a81db9b5-c517-47f7-b874-0814650d6be2-host\") pod \"a81db9b5-c517-47f7-b874-0814650d6be2\" (UID: \"a81db9b5-c517-47f7-b874-0814650d6be2\") " Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.038959 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a81db9b5-c517-47f7-b874-0814650d6be2-host" (OuterVolumeSpecName: "host") pod "a81db9b5-c517-47f7-b874-0814650d6be2" (UID: "a81db9b5-c517-47f7-b874-0814650d6be2"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.039270 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gblm8\" (UniqueName: \"kubernetes.io/projected/a81db9b5-c517-47f7-b874-0814650d6be2-kube-api-access-gblm8\") pod \"a81db9b5-c517-47f7-b874-0814650d6be2\" (UID: \"a81db9b5-c517-47f7-b874-0814650d6be2\") " Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.040561 4780 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a81db9b5-c517-47f7-b874-0814650d6be2-host\") on node \"crc\" DevicePath \"\"" Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.053452 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a81db9b5-c517-47f7-b874-0814650d6be2-kube-api-access-gblm8" (OuterVolumeSpecName: "kube-api-access-gblm8") pod "a81db9b5-c517-47f7-b874-0814650d6be2" (UID: "a81db9b5-c517-47f7-b874-0814650d6be2"). InnerVolumeSpecName "kube-api-access-gblm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.143462 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gblm8\" (UniqueName: \"kubernetes.io/projected/a81db9b5-c517-47f7-b874-0814650d6be2-kube-api-access-gblm8\") on node \"crc\" DevicePath \"\"" Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.694112 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b994f4ad501baabf6023b3ad5348917860825a2d665618bb76992131a07560e" Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.694209 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/crc-debug-xt4xs" Dec 10 12:23:09 crc kubenswrapper[4780]: I1210 12:23:09.976156 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a81db9b5-c517-47f7-b874-0814650d6be2" path="/var/lib/kubelet/pods/a81db9b5-c517-47f7-b874-0814650d6be2/volumes" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.279567 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tjsq4/crc-debug-nsx7h"] Dec 10 12:23:10 crc kubenswrapper[4780]: E1210 12:23:10.280684 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30f945df-c349-44c8-8276-542b90117955" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.280721 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="30f945df-c349-44c8-8276-542b90117955" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:23:10 crc kubenswrapper[4780]: E1210 12:23:10.280744 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a81db9b5-c517-47f7-b874-0814650d6be2" containerName="container-00" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.280755 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="a81db9b5-c517-47f7-b874-0814650d6be2" containerName="container-00" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.281791 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="a81db9b5-c517-47f7-b874-0814650d6be2" containerName="container-00" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.281860 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="30f945df-c349-44c8-8276-542b90117955" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.283129 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.292934 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz6wb\" (UniqueName: \"kubernetes.io/projected/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-kube-api-access-xz6wb\") pod \"crc-debug-nsx7h\" (UID: \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\") " pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.293321 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-host\") pod \"crc-debug-nsx7h\" (UID: \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\") " pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.397498 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz6wb\" (UniqueName: \"kubernetes.io/projected/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-kube-api-access-xz6wb\") pod \"crc-debug-nsx7h\" (UID: \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\") " pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.398105 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-host\") pod \"crc-debug-nsx7h\" (UID: \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\") " pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.398191 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-host\") pod \"crc-debug-nsx7h\" (UID: \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\") " pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.428001 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz6wb\" (UniqueName: \"kubernetes.io/projected/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-kube-api-access-xz6wb\") pod \"crc-debug-nsx7h\" (UID: \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\") " pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.606553 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:10 crc kubenswrapper[4780]: I1210 12:23:10.779045 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" event={"ID":"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6","Type":"ContainerStarted","Data":"694f1e2f1087aa37bd714636180a12337e1d1cce6034645c430e14ffe9af5f67"} Dec 10 12:23:10 crc kubenswrapper[4780]: E1210 12:23:10.961622 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:23:11 crc kubenswrapper[4780]: I1210 12:23:11.795758 4780 generic.go:334] "Generic (PLEG): container finished" podID="0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6" containerID="150f2304f0bc012e76a6c2c4fb23450d408bf8aa6560945313da6015fbf64d1a" exitCode=1 Dec 10 12:23:11 crc kubenswrapper[4780]: I1210 12:23:11.795818 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" event={"ID":"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6","Type":"ContainerDied","Data":"150f2304f0bc012e76a6c2c4fb23450d408bf8aa6560945313da6015fbf64d1a"} Dec 10 12:23:11 crc kubenswrapper[4780]: I1210 12:23:11.850947 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-tjsq4/crc-debug-nsx7h"] Dec 10 12:23:11 crc kubenswrapper[4780]: I1210 12:23:11.867457 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-tjsq4/crc-debug-nsx7h"] Dec 10 12:23:12 crc kubenswrapper[4780]: I1210 12:23:12.957054 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:12 crc kubenswrapper[4780]: E1210 12:23:12.961409 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.091697 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xz6wb\" (UniqueName: \"kubernetes.io/projected/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-kube-api-access-xz6wb\") pod \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\" (UID: \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\") " Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.092076 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-host\") pod \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\" (UID: \"0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6\") " Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.092390 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-host" (OuterVolumeSpecName: "host") pod "0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6" (UID: "0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.093307 4780 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-host\") on node \"crc\" DevicePath \"\"" Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.108269 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-kube-api-access-xz6wb" (OuterVolumeSpecName: "kube-api-access-xz6wb") pod "0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6" (UID: "0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6"). InnerVolumeSpecName "kube-api-access-xz6wb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.195767 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xz6wb\" (UniqueName: \"kubernetes.io/projected/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6-kube-api-access-xz6wb\") on node \"crc\" DevicePath \"\"" Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.829115 4780 scope.go:117] "RemoveContainer" containerID="150f2304f0bc012e76a6c2c4fb23450d408bf8aa6560945313da6015fbf64d1a" Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.829227 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/crc-debug-nsx7h" Dec 10 12:23:13 crc kubenswrapper[4780]: I1210 12:23:13.978871 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6" path="/var/lib/kubelet/pods/0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6/volumes" Dec 10 12:23:22 crc kubenswrapper[4780]: E1210 12:23:22.979873 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:23:26 crc kubenswrapper[4780]: E1210 12:23:26.118964 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:23:27 crc kubenswrapper[4780]: I1210 12:23:27.475374 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:23:27 crc kubenswrapper[4780]: I1210 12:23:27.476185 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:23:35 crc kubenswrapper[4780]: E1210 12:23:35.972944 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:23:38 crc kubenswrapper[4780]: E1210 12:23:38.963076 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:23:47 crc kubenswrapper[4780]: E1210 12:23:47.961958 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:23:52 crc kubenswrapper[4780]: E1210 12:23:52.966006 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:23:57 crc kubenswrapper[4780]: I1210 12:23:57.475432 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:23:57 crc kubenswrapper[4780]: I1210 12:23:57.476201 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:23:58 crc kubenswrapper[4780]: I1210 12:23:58.094590 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_ae414d98-62ae-4d0c-a76e-0f7af6e32080/aodh-api/0.log" Dec 10 12:23:58 crc kubenswrapper[4780]: I1210 12:23:58.329001 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_ae414d98-62ae-4d0c-a76e-0f7af6e32080/aodh-evaluator/0.log" Dec 10 12:23:58 crc kubenswrapper[4780]: I1210 12:23:58.388205 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_ae414d98-62ae-4d0c-a76e-0f7af6e32080/aodh-listener/0.log" Dec 10 12:23:58 crc kubenswrapper[4780]: I1210 12:23:58.405738 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_ae414d98-62ae-4d0c-a76e-0f7af6e32080/aodh-notifier/0.log" Dec 10 12:23:58 crc kubenswrapper[4780]: I1210 12:23:58.661088 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-684545bb8-fmwfm_2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290/barbican-api/0.log" Dec 10 12:23:58 crc kubenswrapper[4780]: I1210 12:23:58.726241 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-684545bb8-fmwfm_2fcbf0b1-90a6-47f6-8bdf-f72f91cdc290/barbican-api-log/0.log" Dec 10 12:23:58 crc kubenswrapper[4780]: I1210 12:23:58.977931 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7c9948ddf4-nxp8k_26c869ea-7347-4b65-9a77-2995a7e574ce/barbican-keystone-listener/0.log" Dec 10 12:23:58 crc kubenswrapper[4780]: I1210 12:23:58.996226 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7c9948ddf4-nxp8k_26c869ea-7347-4b65-9a77-2995a7e574ce/barbican-keystone-listener-log/0.log" Dec 10 12:23:59 crc kubenswrapper[4780]: I1210 12:23:59.093679 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-57dc4b8bd7-6w5kj_70d1eef3-a181-4ada-897f-4b3b9620e4de/barbican-worker/0.log" Dec 10 12:23:59 crc kubenswrapper[4780]: I1210 12:23:59.242398 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-57dc4b8bd7-6w5kj_70d1eef3-a181-4ada-897f-4b3b9620e4de/barbican-worker-log/0.log" Dec 10 12:23:59 crc kubenswrapper[4780]: I1210 12:23:59.341372 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-5jtvv_51663ab7-946b-4356-b694-5ba7132781f4/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:23:59 crc kubenswrapper[4780]: I1210 12:23:59.603805 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_317b5b7c-bb08-4441-a2ef-8c2d7390ada6/proxy-httpd/0.log" Dec 10 12:23:59 crc kubenswrapper[4780]: I1210 12:23:59.662447 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_317b5b7c-bb08-4441-a2ef-8c2d7390ada6/ceilometer-notification-agent/0.log" Dec 10 12:23:59 crc kubenswrapper[4780]: I1210 12:23:59.718265 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_317b5b7c-bb08-4441-a2ef-8c2d7390ada6/sg-core/0.log" Dec 10 12:23:59 crc kubenswrapper[4780]: I1210 12:23:59.926571 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_5a0987c6-976b-4d4e-9456-4516cdaf53a0/cinder-api/0.log" Dec 10 12:24:00 crc kubenswrapper[4780]: I1210 12:24:00.025683 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_5a0987c6-976b-4d4e-9456-4516cdaf53a0/cinder-api-log/0.log" Dec 10 12:24:00 crc kubenswrapper[4780]: I1210 12:24:00.233697 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_274acc05-0f10-48e5-8fb8-44bc1ddca126/cinder-scheduler/0.log" Dec 10 12:24:00 crc kubenswrapper[4780]: I1210 12:24:00.275582 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_274acc05-0f10-48e5-8fb8-44bc1ddca126/probe/0.log" Dec 10 12:24:00 crc kubenswrapper[4780]: I1210 12:24:00.484262 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-pbpzf_c79c532d-1798-4422-be7c-1b212a5f6973/init/0.log" Dec 10 12:24:00 crc kubenswrapper[4780]: I1210 12:24:00.742071 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-pbpzf_c79c532d-1798-4422-be7c-1b212a5f6973/init/0.log" Dec 10 12:24:00 crc kubenswrapper[4780]: I1210 12:24:00.759280 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-2jkww_305fe5d9-cca1-44f3-9ec9-034bde2b5434/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:00 crc kubenswrapper[4780]: I1210 12:24:00.795988 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-pbpzf_c79c532d-1798-4422-be7c-1b212a5f6973/dnsmasq-dns/0.log" Dec 10 12:24:01 crc kubenswrapper[4780]: I1210 12:24:01.009198 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-lmzcj_920ea895-bafa-45e4-9005-175e5114e673/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:01 crc kubenswrapper[4780]: I1210 12:24:01.126422 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-p6drx_fa1fb8b2-2d6f-489e-8d87-3c88d49262b9/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:01 crc kubenswrapper[4780]: I1210 12:24:01.412414 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-r9hfm_56ce8299-173b-429b-b042-f78fb64b6a74/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:01 crc kubenswrapper[4780]: I1210 12:24:01.521809 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-v4lhh_30f945df-c349-44c8-8276-542b90117955/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:01 crc kubenswrapper[4780]: I1210 12:24:01.785846 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-vfqk8_460516db-cb7d-4309-bbf8-1b4af468dac4/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:01 crc kubenswrapper[4780]: I1210 12:24:01.855720 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-wmlrk_eaf4555e-3d94-4509-b81c-2de2321cff58/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:02 crc kubenswrapper[4780]: I1210 12:24:02.145254 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_19d9e474-ad81-4f65-aad2-de223d59c35f/glance-log/0.log" Dec 10 12:24:02 crc kubenswrapper[4780]: I1210 12:24:02.227130 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_19d9e474-ad81-4f65-aad2-de223d59c35f/glance-httpd/0.log" Dec 10 12:24:02 crc kubenswrapper[4780]: I1210 12:24:02.395503 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d8469667-f6d8-4f05-98b9-7e48fe11bbb9/glance-httpd/0.log" Dec 10 12:24:02 crc kubenswrapper[4780]: I1210 12:24:02.436644 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d8469667-f6d8-4f05-98b9-7e48fe11bbb9/glance-log/0.log" Dec 10 12:24:02 crc kubenswrapper[4780]: E1210 12:24:02.968835 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:24:03 crc kubenswrapper[4780]: I1210 12:24:03.326358 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-86d9dd567b-q6nsm_85332002-7a9a-4738-a4cd-5b66c34658b2/heat-engine/0.log" Dec 10 12:24:03 crc kubenswrapper[4780]: I1210 12:24:03.602968 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29422801-mp9nl_b1af2bed-664f-49ec-b2f3-7b835900ee5f/keystone-cron/0.log" Dec 10 12:24:03 crc kubenswrapper[4780]: I1210 12:24:03.628147 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-5bffc7b484-bhjz4_43cf9913-8179-4d01-a9d8-40ae5078b366/heat-api/0.log" Dec 10 12:24:03 crc kubenswrapper[4780]: I1210 12:24:03.638540 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-8579f5f4d5-t2zl7_5e7c227f-4a69-4a34-a847-b9bf3e4ed937/keystone-api/0.log" Dec 10 12:24:03 crc kubenswrapper[4780]: I1210 12:24:03.690391 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-5fbd6d5fcb-7dcpq_0e464f35-41ce-4f1e-b728-c12bfb04abb4/heat-cfnapi/0.log" Dec 10 12:24:03 crc kubenswrapper[4780]: E1210 12:24:03.964267 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:24:04 crc kubenswrapper[4780]: I1210 12:24:04.181753 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_d8bd0c6d-1e03-435c-b09c-a4f7f4c942cd/kube-state-metrics/0.log" Dec 10 12:24:04 crc kubenswrapper[4780]: I1210 12:24:04.540675 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_acedb1e6-5bdd-428b-8a8a-92b87a1ce4a4/mysqld-exporter/0.log" Dec 10 12:24:04 crc kubenswrapper[4780]: I1210 12:24:04.612669 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5ccbff869-dj8wl_7dff6c31-a933-44b7-ad10-b06d1527c768/neutron-api/0.log" Dec 10 12:24:04 crc kubenswrapper[4780]: I1210 12:24:04.677720 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5ccbff869-dj8wl_7dff6c31-a933-44b7-ad10-b06d1527c768/neutron-httpd/0.log" Dec 10 12:24:05 crc kubenswrapper[4780]: I1210 12:24:05.031307 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_de3b2cd2-6566-45ee-b000-47ebd28169a9/nova-api-log/0.log" Dec 10 12:24:05 crc kubenswrapper[4780]: I1210 12:24:05.316728 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_a6f72afd-0015-45dd-9f11-b777b4a99211/nova-cell0-conductor-conductor/0.log" Dec 10 12:24:05 crc kubenswrapper[4780]: I1210 12:24:05.420223 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_3d2551f7-35c2-4f3d-aa10-a4e87dc81310/nova-cell1-conductor-conductor/0.log" Dec 10 12:24:05 crc kubenswrapper[4780]: I1210 12:24:05.697118 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_de3b2cd2-6566-45ee-b000-47ebd28169a9/nova-api-api/0.log" Dec 10 12:24:06 crc kubenswrapper[4780]: I1210 12:24:06.476867 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_e89fd3a5-00af-4d5e-bcd3-246cff6f2d68/nova-cell1-novncproxy-novncproxy/0.log" Dec 10 12:24:06 crc kubenswrapper[4780]: I1210 12:24:06.524434 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1949c654-c734-4a35-a616-4fd761289785/nova-metadata-log/0.log" Dec 10 12:24:06 crc kubenswrapper[4780]: I1210 12:24:06.989026 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_90d3129e-d394-45fa-bc1e-d576fb9e1ba5/nova-scheduler-scheduler/0.log" Dec 10 12:24:07 crc kubenswrapper[4780]: I1210 12:24:07.027482 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b/mysql-bootstrap/0.log" Dec 10 12:24:07 crc kubenswrapper[4780]: I1210 12:24:07.336445 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b/mysql-bootstrap/0.log" Dec 10 12:24:07 crc kubenswrapper[4780]: I1210 12:24:07.469538 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ee80edf3-0250-44c9-acfb-e2f9a3ce4f4b/galera/0.log" Dec 10 12:24:07 crc kubenswrapper[4780]: I1210 12:24:07.674812 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c31145f5-6188-4934-8ceb-a86ac4a0e997/mysql-bootstrap/0.log" Dec 10 12:24:07 crc kubenswrapper[4780]: I1210 12:24:07.888062 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c31145f5-6188-4934-8ceb-a86ac4a0e997/mysql-bootstrap/0.log" Dec 10 12:24:08 crc kubenswrapper[4780]: I1210 12:24:08.014417 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_c31145f5-6188-4934-8ceb-a86ac4a0e997/galera/0.log" Dec 10 12:24:08 crc kubenswrapper[4780]: I1210 12:24:08.224807 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_e74ddef3-dfb9-4409-9920-1cad0dc2492c/openstackclient/0.log" Dec 10 12:24:08 crc kubenswrapper[4780]: I1210 12:24:08.390563 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-jgcc9_72e2c2ed-0530-4846-9244-b93076ed5640/openstack-network-exporter/0.log" Dec 10 12:24:09 crc kubenswrapper[4780]: I1210 12:24:09.299559 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1949c654-c734-4a35-a616-4fd761289785/nova-metadata-metadata/0.log" Dec 10 12:24:09 crc kubenswrapper[4780]: I1210 12:24:09.438555 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hzgvf_1c086cc9-263e-4d8e-b3fb-a64fea7f179c/ovsdb-server-init/0.log" Dec 10 12:24:09 crc kubenswrapper[4780]: I1210 12:24:09.684636 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hzgvf_1c086cc9-263e-4d8e-b3fb-a64fea7f179c/ovsdb-server-init/0.log" Dec 10 12:24:09 crc kubenswrapper[4780]: I1210 12:24:09.761979 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hzgvf_1c086cc9-263e-4d8e-b3fb-a64fea7f179c/ovsdb-server/0.log" Dec 10 12:24:09 crc kubenswrapper[4780]: I1210 12:24:09.766730 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hzgvf_1c086cc9-263e-4d8e-b3fb-a64fea7f179c/ovs-vswitchd/0.log" Dec 10 12:24:09 crc kubenswrapper[4780]: I1210 12:24:09.955625 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-wt5zb_6bd77f46-f3d3-45a7-bc8e-f3de677e1583/ovn-controller/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.029706 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7f260d05-cefc-4e35-a7f3-b1a656cdf9cd/openstack-network-exporter/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.086397 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7f260d05-cefc-4e35-a7f3-b1a656cdf9cd/ovn-northd/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.286318 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_34111627-23c0-44bc-8b84-8cecac15cea1/openstack-network-exporter/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.341267 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_34111627-23c0-44bc-8b84-8cecac15cea1/ovsdbserver-nb/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.562674 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1b0a6811-e7b0-4c35-b54f-6b7a457b68d1/openstack-network-exporter/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.620225 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1b0a6811-e7b0-4c35-b54f-6b7a457b68d1/ovsdbserver-sb/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.816269 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-75499b8cb8-8226n_2489cfe4-c4b5-4c9b-9977-02d103de7937/placement-api/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.933410 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-75499b8cb8-8226n_2489cfe4-c4b5-4c9b-9977-02d103de7937/placement-log/0.log" Dec 10 12:24:10 crc kubenswrapper[4780]: I1210 12:24:10.992900 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a75f82ba-b1e5-45cc-8e35-dd8c75c21247/init-config-reloader/0.log" Dec 10 12:24:11 crc kubenswrapper[4780]: I1210 12:24:11.173638 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a75f82ba-b1e5-45cc-8e35-dd8c75c21247/init-config-reloader/0.log" Dec 10 12:24:11 crc kubenswrapper[4780]: I1210 12:24:11.232861 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a75f82ba-b1e5-45cc-8e35-dd8c75c21247/config-reloader/0.log" Dec 10 12:24:11 crc kubenswrapper[4780]: I1210 12:24:11.234582 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a75f82ba-b1e5-45cc-8e35-dd8c75c21247/prometheus/0.log" Dec 10 12:24:11 crc kubenswrapper[4780]: I1210 12:24:11.275021 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a75f82ba-b1e5-45cc-8e35-dd8c75c21247/thanos-sidecar/0.log" Dec 10 12:24:11 crc kubenswrapper[4780]: I1210 12:24:11.499665 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_16eb03dd-df0a-4623-a42a-25a086709c69/setup-container/0.log" Dec 10 12:24:11 crc kubenswrapper[4780]: I1210 12:24:11.782178 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_16eb03dd-df0a-4623-a42a-25a086709c69/setup-container/0.log" Dec 10 12:24:11 crc kubenswrapper[4780]: I1210 12:24:11.854266 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f371fb3f-c503-4308-b0fc-1a180c7e131e/setup-container/0.log" Dec 10 12:24:11 crc kubenswrapper[4780]: I1210 12:24:11.866222 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_16eb03dd-df0a-4623-a42a-25a086709c69/rabbitmq/0.log" Dec 10 12:24:12 crc kubenswrapper[4780]: I1210 12:24:12.070627 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f371fb3f-c503-4308-b0fc-1a180c7e131e/setup-container/0.log" Dec 10 12:24:12 crc kubenswrapper[4780]: I1210 12:24:12.197117 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f371fb3f-c503-4308-b0fc-1a180c7e131e/rabbitmq/0.log" Dec 10 12:24:12 crc kubenswrapper[4780]: I1210 12:24:12.287574 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-zcxsq_c9cea268-1292-4512-b22f-891a4c652dd0/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:12 crc kubenswrapper[4780]: I1210 12:24:12.531746 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-wx2z9_52c6020f-bcfe-437d-93cf-c88d8b77018c/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:24:12 crc kubenswrapper[4780]: I1210 12:24:12.811189 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6579b964d7-f7kj9_73526536-c600-49b3-b73d-2897a05ce69e/proxy-server/0.log" Dec 10 12:24:12 crc kubenswrapper[4780]: I1210 12:24:12.863756 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-5zqjq_72627815-752d-44a8-96cc-428f0239411d/swift-ring-rebalance/0.log" Dec 10 12:24:12 crc kubenswrapper[4780]: I1210 12:24:12.894848 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6579b964d7-f7kj9_73526536-c600-49b3-b73d-2897a05ce69e/proxy-httpd/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.088151 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/account-auditor/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.141348 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/account-reaper/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.313456 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/account-replicator/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.391725 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/account-server/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.488100 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/container-auditor/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.504710 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/container-replicator/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.598510 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/container-server/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.663218 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/container-updater/0.log" Dec 10 12:24:13 crc kubenswrapper[4780]: I1210 12:24:13.842106 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/object-auditor/0.log" Dec 10 12:24:14 crc kubenswrapper[4780]: I1210 12:24:14.148117 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/object-expirer/0.log" Dec 10 12:24:14 crc kubenswrapper[4780]: I1210 12:24:14.149210 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/object-replicator/0.log" Dec 10 12:24:14 crc kubenswrapper[4780]: I1210 12:24:14.180572 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/object-server/0.log" Dec 10 12:24:14 crc kubenswrapper[4780]: I1210 12:24:14.364061 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/object-updater/0.log" Dec 10 12:24:14 crc kubenswrapper[4780]: I1210 12:24:14.400875 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/swift-recon-cron/0.log" Dec 10 12:24:14 crc kubenswrapper[4780]: I1210 12:24:14.441005 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3e1a1225-bdae-4dcb-b10a-02504fe590cd/rsync/0.log" Dec 10 12:24:14 crc kubenswrapper[4780]: E1210 12:24:14.961477 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:24:17 crc kubenswrapper[4780]: E1210 12:24:17.963322 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:24:19 crc kubenswrapper[4780]: I1210 12:24:19.714862 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_fe36479c-f1fb-4928-b399-e56c8df9205c/memcached/0.log" Dec 10 12:24:26 crc kubenswrapper[4780]: E1210 12:24:26.961734 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:24:27 crc kubenswrapper[4780]: I1210 12:24:27.476042 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:24:27 crc kubenswrapper[4780]: I1210 12:24:27.476432 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:24:27 crc kubenswrapper[4780]: I1210 12:24:27.476503 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 12:24:27 crc kubenswrapper[4780]: I1210 12:24:27.478761 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:24:27 crc kubenswrapper[4780]: I1210 12:24:27.479247 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" gracePeriod=600 Dec 10 12:24:27 crc kubenswrapper[4780]: E1210 12:24:27.643023 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:24:28 crc kubenswrapper[4780]: I1210 12:24:28.272440 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" exitCode=0 Dec 10 12:24:28 crc kubenswrapper[4780]: I1210 12:24:28.272498 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176"} Dec 10 12:24:28 crc kubenswrapper[4780]: I1210 12:24:28.272542 4780 scope.go:117] "RemoveContainer" containerID="d9aa121d6117957daf94487eb5a96d86e219315a547b12e5697d28ed83b885b7" Dec 10 12:24:28 crc kubenswrapper[4780]: I1210 12:24:28.278153 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:24:28 crc kubenswrapper[4780]: E1210 12:24:28.279535 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:24:31 crc kubenswrapper[4780]: E1210 12:24:31.965850 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:24:39 crc kubenswrapper[4780]: I1210 12:24:39.959737 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:24:39 crc kubenswrapper[4780]: E1210 12:24:39.960722 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:24:40 crc kubenswrapper[4780]: E1210 12:24:40.963310 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:24:42 crc kubenswrapper[4780]: E1210 12:24:42.962644 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:24:50 crc kubenswrapper[4780]: I1210 12:24:50.653117 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58_d1bfcf03-5ce1-40ab-b3bd-072420cc36f7/util/0.log" Dec 10 12:24:50 crc kubenswrapper[4780]: I1210 12:24:50.923086 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58_d1bfcf03-5ce1-40ab-b3bd-072420cc36f7/pull/0.log" Dec 10 12:24:50 crc kubenswrapper[4780]: I1210 12:24:50.959001 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58_d1bfcf03-5ce1-40ab-b3bd-072420cc36f7/util/0.log" Dec 10 12:24:50 crc kubenswrapper[4780]: I1210 12:24:50.983121 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58_d1bfcf03-5ce1-40ab-b3bd-072420cc36f7/pull/0.log" Dec 10 12:24:51 crc kubenswrapper[4780]: I1210 12:24:51.205821 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58_d1bfcf03-5ce1-40ab-b3bd-072420cc36f7/util/0.log" Dec 10 12:24:51 crc kubenswrapper[4780]: I1210 12:24:51.250283 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58_d1bfcf03-5ce1-40ab-b3bd-072420cc36f7/extract/0.log" Dec 10 12:24:51 crc kubenswrapper[4780]: I1210 12:24:51.302465 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e29kh58_d1bfcf03-5ce1-40ab-b3bd-072420cc36f7/pull/0.log" Dec 10 12:24:51 crc kubenswrapper[4780]: I1210 12:24:51.963492 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:24:51 crc kubenswrapper[4780]: E1210 12:24:51.963944 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:24:52 crc kubenswrapper[4780]: I1210 12:24:52.667143 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-rnfbd_6f18f8cf-e493-41bd-92e6-a7714992854d/kube-rbac-proxy/0.log" Dec 10 12:24:52 crc kubenswrapper[4780]: I1210 12:24:52.729723 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-rnfbd_6f18f8cf-e493-41bd-92e6-a7714992854d/manager/0.log" Dec 10 12:24:52 crc kubenswrapper[4780]: I1210 12:24:52.773681 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-nkdgw_5a684cfd-18e4-4f16-a0dd-73f2238cce27/kube-rbac-proxy/0.log" Dec 10 12:24:52 crc kubenswrapper[4780]: I1210 12:24:52.984489 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-nkdgw_5a684cfd-18e4-4f16-a0dd-73f2238cce27/manager/0.log" Dec 10 12:24:53 crc kubenswrapper[4780]: I1210 12:24:53.047305 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-h6ggx_77993ff6-b277-4ef9-a00d-08a47d02d483/kube-rbac-proxy/0.log" Dec 10 12:24:53 crc kubenswrapper[4780]: I1210 12:24:53.126536 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-h6ggx_77993ff6-b277-4ef9-a00d-08a47d02d483/manager/0.log" Dec 10 12:24:53 crc kubenswrapper[4780]: I1210 12:24:53.305140 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-lx86j_16c6406a-69c5-4365-81d9-8bf51365cd08/kube-rbac-proxy/0.log" Dec 10 12:24:53 crc kubenswrapper[4780]: I1210 12:24:53.396498 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-lx86j_16c6406a-69c5-4365-81d9-8bf51365cd08/manager/0.log" Dec 10 12:24:53 crc kubenswrapper[4780]: I1210 12:24:53.589214 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-csmkt_d235302b-56b1-4515-9f26-4f0ea884aa87/kube-rbac-proxy/0.log" Dec 10 12:24:53 crc kubenswrapper[4780]: I1210 12:24:53.739438 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-csmkt_d235302b-56b1-4515-9f26-4f0ea884aa87/manager/0.log" Dec 10 12:24:53 crc kubenswrapper[4780]: I1210 12:24:53.846138 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-896x8_0dbb81a9-f820-447a-a475-911ae4a53034/kube-rbac-proxy/0.log" Dec 10 12:24:53 crc kubenswrapper[4780]: I1210 12:24:53.941649 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-896x8_0dbb81a9-f820-447a-a475-911ae4a53034/manager/0.log" Dec 10 12:24:54 crc kubenswrapper[4780]: I1210 12:24:54.071937 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-d2txd_e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0/kube-rbac-proxy/0.log" Dec 10 12:24:54 crc kubenswrapper[4780]: I1210 12:24:54.365766 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-84x72_5e979946-7a11-46af-ab82-77bae1669169/manager/0.log" Dec 10 12:24:54 crc kubenswrapper[4780]: I1210 12:24:54.371580 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-84x72_5e979946-7a11-46af-ab82-77bae1669169/kube-rbac-proxy/0.log" Dec 10 12:24:54 crc kubenswrapper[4780]: I1210 12:24:54.449656 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-d2txd_e4a7d6c0-ca7b-4e3d-bff9-51d6c66810c0/manager/0.log" Dec 10 12:24:54 crc kubenswrapper[4780]: I1210 12:24:54.659157 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-v6fxt_9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10/kube-rbac-proxy/0.log" Dec 10 12:24:54 crc kubenswrapper[4780]: I1210 12:24:54.752587 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-v6fxt_9d665ff9-da6c-4ae6-82f1-0fa8d9a3fb10/manager/0.log" Dec 10 12:24:54 crc kubenswrapper[4780]: I1210 12:24:54.796854 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-jb52h_0f7ed694-a606-482f-90ca-bbe99437b5f7/kube-rbac-proxy/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.023169 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-jxsxr_b1ef4b52-99f2-4257-97d4-bdb6f871f73f/manager/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.027297 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-jxsxr_b1ef4b52-99f2-4257-97d4-bdb6f871f73f/kube-rbac-proxy/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.133702 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-jb52h_0f7ed694-a606-482f-90ca-bbe99437b5f7/manager/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.288647 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-nlzr9_3a1cc2ce-3a32-447a-8824-7ec8c84b1613/kube-rbac-proxy/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.381074 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-lgmzm_bbfacb89-13e0-45ef-853a-1faf76e014d7/kube-rbac-proxy/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.411658 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-nlzr9_3a1cc2ce-3a32-447a-8824-7ec8c84b1613/manager/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.709441 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-lgmzm_bbfacb89-13e0-45ef-853a-1faf76e014d7/manager/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.828943 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-zs8hq_23cc63a0-e8c1-49bc-9762-daa6d315409e/kube-rbac-proxy/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: I1210 12:24:55.834733 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-zs8hq_23cc63a0-e8c1-49bc-9762-daa6d315409e/manager/0.log" Dec 10 12:24:55 crc kubenswrapper[4780]: E1210 12:24:55.981160 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:24:56 crc kubenswrapper[4780]: I1210 12:24:56.032346 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fkqjsw_7d98f853-1b52-438c-a5d1-6fe334794a35/kube-rbac-proxy/0.log" Dec 10 12:24:56 crc kubenswrapper[4780]: I1210 12:24:56.191163 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fkqjsw_7d98f853-1b52-438c-a5d1-6fe334794a35/manager/0.log" Dec 10 12:24:56 crc kubenswrapper[4780]: I1210 12:24:56.786871 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-7lvcb_d8ff2ff1-ecd6-4464-8366-1579f94d99d8/registry-server/0.log" Dec 10 12:24:56 crc kubenswrapper[4780]: I1210 12:24:56.901393 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7b77d4dbbf-qd7x4_033217b3-d237-4cd5-8aae-f99edb1dec27/operator/0.log" Dec 10 12:24:56 crc kubenswrapper[4780]: E1210 12:24:56.965165 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.215742 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-jwjxx_b88956c1-b60b-4a6f-948a-de685134880f/kube-rbac-proxy/0.log" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.219825 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-tgbxl_42edfebc-9a83-460f-9bb9-50172b9763d3/kube-rbac-proxy/0.log" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.246210 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-jwjxx_b88956c1-b60b-4a6f-948a-de685134880f/manager/0.log" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.447514 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-678c445b7b-6kmsn_79ed5cd6-1f3a-4e73-ae77-ce5565e5f7a3/manager/0.log" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.506829 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-tgbxl_42edfebc-9a83-460f-9bb9-50172b9763d3/manager/0.log" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.524744 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-52xpp_3953ad68-9125-44a8-819f-0c48aafcfbf3/operator/0.log" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.767855 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-t764b_47ef7d7b-4052-4068-adef-b6a94353f980/manager/0.log" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.804028 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-t764b_47ef7d7b-4052-4068-adef-b6a94353f980/kube-rbac-proxy/0.log" Dec 10 12:24:57 crc kubenswrapper[4780]: I1210 12:24:57.963092 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54d54d59bc-c7gtb_194d6da3-e6b6-4330-9afa-d973a8bb03c2/kube-rbac-proxy/0.log" Dec 10 12:24:58 crc kubenswrapper[4780]: I1210 12:24:58.053471 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-4vbhw_de0be1fa-33a0-44ad-9aed-c791a447510a/kube-rbac-proxy/0.log" Dec 10 12:24:58 crc kubenswrapper[4780]: I1210 12:24:58.286694 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-4vbhw_de0be1fa-33a0-44ad-9aed-c791a447510a/manager/0.log" Dec 10 12:24:58 crc kubenswrapper[4780]: I1210 12:24:58.355788 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-9q8g7_a170f760-08d9-4fd6-b90d-46ef21e4691e/kube-rbac-proxy/0.log" Dec 10 12:24:58 crc kubenswrapper[4780]: I1210 12:24:58.666732 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-9q8g7_a170f760-08d9-4fd6-b90d-46ef21e4691e/manager/0.log" Dec 10 12:24:58 crc kubenswrapper[4780]: I1210 12:24:58.691849 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54d54d59bc-c7gtb_194d6da3-e6b6-4330-9afa-d973a8bb03c2/manager/0.log" Dec 10 12:25:02 crc kubenswrapper[4780]: I1210 12:25:02.960285 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:25:02 crc kubenswrapper[4780]: E1210 12:25:02.963051 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:25:09 crc kubenswrapper[4780]: E1210 12:25:09.962168 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:25:10 crc kubenswrapper[4780]: E1210 12:25:10.962517 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:25:16 crc kubenswrapper[4780]: I1210 12:25:16.960637 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:25:16 crc kubenswrapper[4780]: E1210 12:25:16.963608 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:25:22 crc kubenswrapper[4780]: E1210 12:25:22.962460 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:25:26 crc kubenswrapper[4780]: E1210 12:25:26.703794 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:25:27 crc kubenswrapper[4780]: I1210 12:25:27.961188 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:25:27 crc kubenswrapper[4780]: E1210 12:25:27.961988 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:25:31 crc kubenswrapper[4780]: I1210 12:25:31.244413 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-nmvlq_19f469f0-60aa-4251-88f4-96baafae3f21/control-plane-machine-set-operator/0.log" Dec 10 12:25:31 crc kubenswrapper[4780]: I1210 12:25:31.476128 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-2mb96_428a3826-00fc-4452-8f22-61d02857b761/machine-api-operator/0.log" Dec 10 12:25:31 crc kubenswrapper[4780]: I1210 12:25:31.497724 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-2mb96_428a3826-00fc-4452-8f22-61d02857b761/kube-rbac-proxy/0.log" Dec 10 12:25:33 crc kubenswrapper[4780]: E1210 12:25:33.962281 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:25:37 crc kubenswrapper[4780]: E1210 12:25:37.962537 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:25:41 crc kubenswrapper[4780]: I1210 12:25:41.960525 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:25:41 crc kubenswrapper[4780]: E1210 12:25:41.961559 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.583838 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lr8dk"] Dec 10 12:25:45 crc kubenswrapper[4780]: E1210 12:25:45.585857 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6" containerName="container-00" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.585985 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6" containerName="container-00" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.586622 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b8b367a-e554-4b39-a62f-8a3ddbbcbcb6" containerName="container-00" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.590618 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.601636 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lr8dk"] Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.713294 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-utilities\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.713556 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-468d2\" (UniqueName: \"kubernetes.io/projected/de39bd30-86bf-4518-89af-c7fca485a15c-kube-api-access-468d2\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.713729 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-catalog-content\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.816515 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-utilities\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.816791 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-468d2\" (UniqueName: \"kubernetes.io/projected/de39bd30-86bf-4518-89af-c7fca485a15c-kube-api-access-468d2\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.817029 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-catalog-content\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.818090 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-utilities\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.818132 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-catalog-content\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.839892 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-468d2\" (UniqueName: \"kubernetes.io/projected/de39bd30-86bf-4518-89af-c7fca485a15c-kube-api-access-468d2\") pod \"redhat-marketplace-lr8dk\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: I1210 12:25:45.922547 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:45 crc kubenswrapper[4780]: E1210 12:25:45.960567 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:25:46 crc kubenswrapper[4780]: I1210 12:25:46.615278 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lr8dk"] Dec 10 12:25:46 crc kubenswrapper[4780]: W1210 12:25:46.620650 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde39bd30_86bf_4518_89af_c7fca485a15c.slice/crio-cd915fdf6c923a33cc4264c9bb32cafe1a8d1cf3ea47ed580617ff868bab9066 WatchSource:0}: Error finding container cd915fdf6c923a33cc4264c9bb32cafe1a8d1cf3ea47ed580617ff868bab9066: Status 404 returned error can't find the container with id cd915fdf6c923a33cc4264c9bb32cafe1a8d1cf3ea47ed580617ff868bab9066 Dec 10 12:25:47 crc kubenswrapper[4780]: I1210 12:25:47.046797 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lr8dk" event={"ID":"de39bd30-86bf-4518-89af-c7fca485a15c","Type":"ContainerStarted","Data":"cd915fdf6c923a33cc4264c9bb32cafe1a8d1cf3ea47ed580617ff868bab9066"} Dec 10 12:25:48 crc kubenswrapper[4780]: I1210 12:25:48.062274 4780 generic.go:334] "Generic (PLEG): container finished" podID="de39bd30-86bf-4518-89af-c7fca485a15c" containerID="8fd38b6bea4f2cbff44e7c02db4795ce3ed9ad94a167aeb345502d4d60993448" exitCode=0 Dec 10 12:25:48 crc kubenswrapper[4780]: I1210 12:25:48.062409 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lr8dk" event={"ID":"de39bd30-86bf-4518-89af-c7fca485a15c","Type":"ContainerDied","Data":"8fd38b6bea4f2cbff44e7c02db4795ce3ed9ad94a167aeb345502d4d60993448"} Dec 10 12:25:50 crc kubenswrapper[4780]: I1210 12:25:50.095777 4780 generic.go:334] "Generic (PLEG): container finished" podID="de39bd30-86bf-4518-89af-c7fca485a15c" containerID="3a89226f96fe93282936f7cfd58d50aabdb69a2a7b4d1f9994ce4284bf0e0ee1" exitCode=0 Dec 10 12:25:50 crc kubenswrapper[4780]: I1210 12:25:50.096000 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lr8dk" event={"ID":"de39bd30-86bf-4518-89af-c7fca485a15c","Type":"ContainerDied","Data":"3a89226f96fe93282936f7cfd58d50aabdb69a2a7b4d1f9994ce4284bf0e0ee1"} Dec 10 12:25:51 crc kubenswrapper[4780]: I1210 12:25:51.313574 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-gzx8b_1d0900bc-ab2b-46a8-b940-4e9e8fd9dd81/cert-manager-controller/0.log" Dec 10 12:25:51 crc kubenswrapper[4780]: I1210 12:25:51.332396 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-qrnfx_25874150-d93c-4f21-a259-e4993d52a783/cert-manager-cainjector/0.log" Dec 10 12:25:51 crc kubenswrapper[4780]: I1210 12:25:51.667782 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-gv6zp_52db28df-3ad3-4c20-9c42-168bb32f6c08/cert-manager-webhook/0.log" Dec 10 12:25:51 crc kubenswrapper[4780]: E1210 12:25:51.961939 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:25:52 crc kubenswrapper[4780]: I1210 12:25:52.124462 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lr8dk" event={"ID":"de39bd30-86bf-4518-89af-c7fca485a15c","Type":"ContainerStarted","Data":"80482c36edec6ea0b06ef73f06af4f81e0ef2ee0854b64164ba8444b3bcebfa6"} Dec 10 12:25:52 crc kubenswrapper[4780]: I1210 12:25:52.180204 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lr8dk" podStartSLOduration=4.689853425 podStartE2EDuration="7.180176749s" podCreationTimestamp="2025-12-10 12:25:45 +0000 UTC" firstStartedPulling="2025-12-10 12:25:48.066346251 +0000 UTC m=+6052.919739694" lastFinishedPulling="2025-12-10 12:25:50.556669575 +0000 UTC m=+6055.410063018" observedRunningTime="2025-12-10 12:25:52.17077046 +0000 UTC m=+6057.024163913" watchObservedRunningTime="2025-12-10 12:25:52.180176749 +0000 UTC m=+6057.033570192" Dec 10 12:25:54 crc kubenswrapper[4780]: I1210 12:25:54.959818 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:25:54 crc kubenswrapper[4780]: E1210 12:25:54.960561 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:25:55 crc kubenswrapper[4780]: I1210 12:25:55.923568 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:55 crc kubenswrapper[4780]: I1210 12:25:55.923657 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:56 crc kubenswrapper[4780]: I1210 12:25:56.002384 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:56 crc kubenswrapper[4780]: I1210 12:25:56.226132 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:56 crc kubenswrapper[4780]: I1210 12:25:56.292141 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lr8dk"] Dec 10 12:25:58 crc kubenswrapper[4780]: I1210 12:25:58.190629 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lr8dk" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" containerName="registry-server" containerID="cri-o://80482c36edec6ea0b06ef73f06af4f81e0ef2ee0854b64164ba8444b3bcebfa6" gracePeriod=2 Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.214044 4780 generic.go:334] "Generic (PLEG): container finished" podID="de39bd30-86bf-4518-89af-c7fca485a15c" containerID="80482c36edec6ea0b06ef73f06af4f81e0ef2ee0854b64164ba8444b3bcebfa6" exitCode=0 Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.214135 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lr8dk" event={"ID":"de39bd30-86bf-4518-89af-c7fca485a15c","Type":"ContainerDied","Data":"80482c36edec6ea0b06ef73f06af4f81e0ef2ee0854b64164ba8444b3bcebfa6"} Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.310325 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.376417 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-catalog-content\") pod \"de39bd30-86bf-4518-89af-c7fca485a15c\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.376500 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-468d2\" (UniqueName: \"kubernetes.io/projected/de39bd30-86bf-4518-89af-c7fca485a15c-kube-api-access-468d2\") pod \"de39bd30-86bf-4518-89af-c7fca485a15c\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.377138 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-utilities\") pod \"de39bd30-86bf-4518-89af-c7fca485a15c\" (UID: \"de39bd30-86bf-4518-89af-c7fca485a15c\") " Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.378743 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-utilities" (OuterVolumeSpecName: "utilities") pod "de39bd30-86bf-4518-89af-c7fca485a15c" (UID: "de39bd30-86bf-4518-89af-c7fca485a15c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.386404 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de39bd30-86bf-4518-89af-c7fca485a15c-kube-api-access-468d2" (OuterVolumeSpecName: "kube-api-access-468d2") pod "de39bd30-86bf-4518-89af-c7fca485a15c" (UID: "de39bd30-86bf-4518-89af-c7fca485a15c"). InnerVolumeSpecName "kube-api-access-468d2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.398095 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de39bd30-86bf-4518-89af-c7fca485a15c" (UID: "de39bd30-86bf-4518-89af-c7fca485a15c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.479251 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.479291 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de39bd30-86bf-4518-89af-c7fca485a15c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:25:59 crc kubenswrapper[4780]: I1210 12:25:59.479306 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-468d2\" (UniqueName: \"kubernetes.io/projected/de39bd30-86bf-4518-89af-c7fca485a15c-kube-api-access-468d2\") on node \"crc\" DevicePath \"\"" Dec 10 12:25:59 crc kubenswrapper[4780]: E1210 12:25:59.962935 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:26:00 crc kubenswrapper[4780]: I1210 12:26:00.236263 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lr8dk" event={"ID":"de39bd30-86bf-4518-89af-c7fca485a15c","Type":"ContainerDied","Data":"cd915fdf6c923a33cc4264c9bb32cafe1a8d1cf3ea47ed580617ff868bab9066"} Dec 10 12:26:00 crc kubenswrapper[4780]: I1210 12:26:00.236338 4780 scope.go:117] "RemoveContainer" containerID="80482c36edec6ea0b06ef73f06af4f81e0ef2ee0854b64164ba8444b3bcebfa6" Dec 10 12:26:00 crc kubenswrapper[4780]: I1210 12:26:00.236523 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lr8dk" Dec 10 12:26:00 crc kubenswrapper[4780]: I1210 12:26:00.274097 4780 scope.go:117] "RemoveContainer" containerID="3a89226f96fe93282936f7cfd58d50aabdb69a2a7b4d1f9994ce4284bf0e0ee1" Dec 10 12:26:00 crc kubenswrapper[4780]: I1210 12:26:00.280139 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lr8dk"] Dec 10 12:26:00 crc kubenswrapper[4780]: I1210 12:26:00.312219 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lr8dk"] Dec 10 12:26:00 crc kubenswrapper[4780]: I1210 12:26:00.323250 4780 scope.go:117] "RemoveContainer" containerID="8fd38b6bea4f2cbff44e7c02db4795ce3ed9ad94a167aeb345502d4d60993448" Dec 10 12:26:01 crc kubenswrapper[4780]: I1210 12:26:01.973271 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" path="/var/lib/kubelet/pods/de39bd30-86bf-4518-89af-c7fca485a15c/volumes" Dec 10 12:26:04 crc kubenswrapper[4780]: E1210 12:26:04.961451 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:26:07 crc kubenswrapper[4780]: I1210 12:26:07.962890 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:26:07 crc kubenswrapper[4780]: E1210 12:26:07.963972 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:26:08 crc kubenswrapper[4780]: I1210 12:26:08.647279 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-b9mk2_ea0c76a7-e4a2-479c-8aa3-76ec59ce572b/nmstate-console-plugin/0.log" Dec 10 12:26:08 crc kubenswrapper[4780]: I1210 12:26:08.867481 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-jx5fq_e195bedf-9712-4f1f-a9f7-9f4dabdd710b/nmstate-handler/0.log" Dec 10 12:26:08 crc kubenswrapper[4780]: I1210 12:26:08.927858 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-7dzr2_280c4015-3911-4b5b-b794-23e319640bd0/kube-rbac-proxy/0.log" Dec 10 12:26:09 crc kubenswrapper[4780]: I1210 12:26:09.075333 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-7dzr2_280c4015-3911-4b5b-b794-23e319640bd0/nmstate-metrics/0.log" Dec 10 12:26:09 crc kubenswrapper[4780]: I1210 12:26:09.191802 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-jchz6_3e4022f3-ad78-4495-beb0-74c4274026d5/nmstate-operator/0.log" Dec 10 12:26:09 crc kubenswrapper[4780]: I1210 12:26:09.339647 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-vqlpg_52bc8018-7b47-4053-8bc1-3b686af14adf/nmstate-webhook/0.log" Dec 10 12:26:11 crc kubenswrapper[4780]: E1210 12:26:11.962578 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:26:16 crc kubenswrapper[4780]: E1210 12:26:16.962569 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:26:20 crc kubenswrapper[4780]: I1210 12:26:20.960832 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:26:20 crc kubenswrapper[4780]: E1210 12:26:20.961830 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:26:23 crc kubenswrapper[4780]: I1210 12:26:23.705943 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-54599dc8c7-jsrb7_1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa/kube-rbac-proxy/0.log" Dec 10 12:26:23 crc kubenswrapper[4780]: I1210 12:26:23.711703 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-54599dc8c7-jsrb7_1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa/manager/0.log" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.052573 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-29rwf"] Dec 10 12:26:24 crc kubenswrapper[4780]: E1210 12:26:24.053397 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" containerName="registry-server" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.053420 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" containerName="registry-server" Dec 10 12:26:24 crc kubenswrapper[4780]: E1210 12:26:24.053440 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" containerName="extract-utilities" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.053451 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" containerName="extract-utilities" Dec 10 12:26:24 crc kubenswrapper[4780]: E1210 12:26:24.053464 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" containerName="extract-content" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.053471 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" containerName="extract-content" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.053782 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="de39bd30-86bf-4518-89af-c7fca485a15c" containerName="registry-server" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.059302 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.079802 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-29rwf"] Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.239884 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-catalog-content\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.240940 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwvs5\" (UniqueName: \"kubernetes.io/projected/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-kube-api-access-vwvs5\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.241059 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-utilities\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.343638 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-catalog-content\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.343831 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwvs5\" (UniqueName: \"kubernetes.io/projected/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-kube-api-access-vwvs5\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.343870 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-utilities\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.345446 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-utilities\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.345562 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-catalog-content\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.374211 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwvs5\" (UniqueName: \"kubernetes.io/projected/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-kube-api-access-vwvs5\") pod \"redhat-operators-29rwf\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: I1210 12:26:24.394843 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:24 crc kubenswrapper[4780]: E1210 12:26:24.960876 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:26:25 crc kubenswrapper[4780]: I1210 12:26:25.035583 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-29rwf"] Dec 10 12:26:25 crc kubenswrapper[4780]: I1210 12:26:25.665229 4780 generic.go:334] "Generic (PLEG): container finished" podID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerID="916f3b3b1250203070bda7e02813efc1a7563ccda5ad3af5ac71607d8950295f" exitCode=0 Dec 10 12:26:25 crc kubenswrapper[4780]: I1210 12:26:25.665330 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-29rwf" event={"ID":"c59f09c8-9aa9-47c8-be29-4f579a2c08f6","Type":"ContainerDied","Data":"916f3b3b1250203070bda7e02813efc1a7563ccda5ad3af5ac71607d8950295f"} Dec 10 12:26:25 crc kubenswrapper[4780]: I1210 12:26:25.665692 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-29rwf" event={"ID":"c59f09c8-9aa9-47c8-be29-4f579a2c08f6","Type":"ContainerStarted","Data":"d5b5340bd4ac1686aadda0423276b94e4099ce43a481820507c5d57464f7d239"} Dec 10 12:26:26 crc kubenswrapper[4780]: I1210 12:26:26.691460 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-29rwf" event={"ID":"c59f09c8-9aa9-47c8-be29-4f579a2c08f6","Type":"ContainerStarted","Data":"bac908c55deb780d336f73e77028cb6c163158f479ba589c153615d887b5f1a0"} Dec 10 12:26:27 crc kubenswrapper[4780]: E1210 12:26:27.963053 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:26:32 crc kubenswrapper[4780]: I1210 12:26:32.959699 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:26:32 crc kubenswrapper[4780]: E1210 12:26:32.960791 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:26:34 crc kubenswrapper[4780]: E1210 12:26:34.698471 4780 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc59f09c8_9aa9_47c8_be29_4f579a2c08f6.slice/crio-conmon-bac908c55deb780d336f73e77028cb6c163158f479ba589c153615d887b5f1a0.scope\": RecentStats: unable to find data in memory cache]" Dec 10 12:26:35 crc kubenswrapper[4780]: I1210 12:26:35.286417 4780 generic.go:334] "Generic (PLEG): container finished" podID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerID="bac908c55deb780d336f73e77028cb6c163158f479ba589c153615d887b5f1a0" exitCode=0 Dec 10 12:26:35 crc kubenswrapper[4780]: I1210 12:26:35.286481 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-29rwf" event={"ID":"c59f09c8-9aa9-47c8-be29-4f579a2c08f6","Type":"ContainerDied","Data":"bac908c55deb780d336f73e77028cb6c163158f479ba589c153615d887b5f1a0"} Dec 10 12:26:36 crc kubenswrapper[4780]: I1210 12:26:36.314429 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-29rwf" event={"ID":"c59f09c8-9aa9-47c8-be29-4f579a2c08f6","Type":"ContainerStarted","Data":"79fbda25921b4b262ab01a74a895034a54e6fa7b3cbecb1abfb440601fc72673"} Dec 10 12:26:36 crc kubenswrapper[4780]: I1210 12:26:36.341538 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-29rwf" podStartSLOduration=2.120146589 podStartE2EDuration="12.341505478s" podCreationTimestamp="2025-12-10 12:26:24 +0000 UTC" firstStartedPulling="2025-12-10 12:26:25.667724812 +0000 UTC m=+6090.521118255" lastFinishedPulling="2025-12-10 12:26:35.889083701 +0000 UTC m=+6100.742477144" observedRunningTime="2025-12-10 12:26:36.338554864 +0000 UTC m=+6101.191948307" watchObservedRunningTime="2025-12-10 12:26:36.341505478 +0000 UTC m=+6101.194898931" Dec 10 12:26:38 crc kubenswrapper[4780]: E1210 12:26:38.961898 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:26:40 crc kubenswrapper[4780]: E1210 12:26:40.964317 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:26:44 crc kubenswrapper[4780]: I1210 12:26:44.395865 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:44 crc kubenswrapper[4780]: I1210 12:26:44.397076 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:44 crc kubenswrapper[4780]: I1210 12:26:44.473571 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:44 crc kubenswrapper[4780]: I1210 12:26:44.545521 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:44 crc kubenswrapper[4780]: I1210 12:26:44.724071 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-29rwf"] Dec 10 12:26:44 crc kubenswrapper[4780]: I1210 12:26:44.794773 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-mbb2k_8f1006aa-0bb6-451f-9096-75c6e760d2db/collector/0.log" Dec 10 12:26:44 crc kubenswrapper[4780]: I1210 12:26:44.796168 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-mswnk_358d8d56-efec-4f25-be4c-9552b279f46f/cluster-logging-operator/0.log" Dec 10 12:26:44 crc kubenswrapper[4780]: I1210 12:26:44.800072 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_61a3cdd1-2b93-4f37-a58e-d3d0918b60eb/loki-compactor/0.log" Dec 10 12:26:45 crc kubenswrapper[4780]: I1210 12:26:45.059479 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-j5hdz_a59d13f6-10bb-4e7d-96c1-46dff7fac4e2/loki-distributor/0.log" Dec 10 12:26:45 crc kubenswrapper[4780]: I1210 12:26:45.089770 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7d6b48847-8n96n_ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546/gateway/0.log" Dec 10 12:26:45 crc kubenswrapper[4780]: I1210 12:26:45.463211 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7d6b48847-8n96n_ec49e3ad-a0d4-4eaa-a8f6-6f76c1663546/opa/0.log" Dec 10 12:26:45 crc kubenswrapper[4780]: I1210 12:26:45.523271 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7d6b48847-cgx9d_dc698901-e923-49fa-bc7f-f4e3f9f0a99b/gateway/0.log" Dec 10 12:26:45 crc kubenswrapper[4780]: I1210 12:26:45.629274 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-7d6b48847-cgx9d_dc698901-e923-49fa-bc7f-f4e3f9f0a99b/opa/0.log" Dec 10 12:26:45 crc kubenswrapper[4780]: I1210 12:26:45.829497 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_1c91b690-c1ad-4923-ad75-f7e5611441f9/loki-index-gateway/0.log" Dec 10 12:26:45 crc kubenswrapper[4780]: I1210 12:26:45.984009 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:26:45 crc kubenswrapper[4780]: E1210 12:26:45.984524 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:26:46 crc kubenswrapper[4780]: I1210 12:26:46.006199 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_2b4928b0-ec66-4cee-8fd3-2067b64c4144/loki-ingester/0.log" Dec 10 12:26:46 crc kubenswrapper[4780]: I1210 12:26:46.103945 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-h98vv_7c0379b8-51d2-4860-be84-18dfcb007969/loki-querier/0.log" Dec 10 12:26:46 crc kubenswrapper[4780]: I1210 12:26:46.300711 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-tjd86_9906b064-40d7-43bc-bb9b-52863d99a2f4/loki-query-frontend/0.log" Dec 10 12:26:46 crc kubenswrapper[4780]: I1210 12:26:46.477495 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-29rwf" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerName="registry-server" containerID="cri-o://79fbda25921b4b262ab01a74a895034a54e6fa7b3cbecb1abfb440601fc72673" gracePeriod=2 Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.509369 4780 generic.go:334] "Generic (PLEG): container finished" podID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerID="79fbda25921b4b262ab01a74a895034a54e6fa7b3cbecb1abfb440601fc72673" exitCode=0 Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.509543 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-29rwf" event={"ID":"c59f09c8-9aa9-47c8-be29-4f579a2c08f6","Type":"ContainerDied","Data":"79fbda25921b4b262ab01a74a895034a54e6fa7b3cbecb1abfb440601fc72673"} Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.510037 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-29rwf" event={"ID":"c59f09c8-9aa9-47c8-be29-4f579a2c08f6","Type":"ContainerDied","Data":"d5b5340bd4ac1686aadda0423276b94e4099ce43a481820507c5d57464f7d239"} Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.510050 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5b5340bd4ac1686aadda0423276b94e4099ce43a481820507c5d57464f7d239" Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.592753 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.625522 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-utilities\") pod \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.625966 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwvs5\" (UniqueName: \"kubernetes.io/projected/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-kube-api-access-vwvs5\") pod \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.626031 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-catalog-content\") pod \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\" (UID: \"c59f09c8-9aa9-47c8-be29-4f579a2c08f6\") " Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.626889 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-utilities" (OuterVolumeSpecName: "utilities") pod "c59f09c8-9aa9-47c8-be29-4f579a2c08f6" (UID: "c59f09c8-9aa9-47c8-be29-4f579a2c08f6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.658268 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-kube-api-access-vwvs5" (OuterVolumeSpecName: "kube-api-access-vwvs5") pod "c59f09c8-9aa9-47c8-be29-4f579a2c08f6" (UID: "c59f09c8-9aa9-47c8-be29-4f579a2c08f6"). InnerVolumeSpecName "kube-api-access-vwvs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.729756 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwvs5\" (UniqueName: \"kubernetes.io/projected/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-kube-api-access-vwvs5\") on node \"crc\" DevicePath \"\"" Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.729821 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.784642 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c59f09c8-9aa9-47c8-be29-4f579a2c08f6" (UID: "c59f09c8-9aa9-47c8-be29-4f579a2c08f6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:26:47 crc kubenswrapper[4780]: I1210 12:26:47.832429 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c59f09c8-9aa9-47c8-be29-4f579a2c08f6-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:26:48 crc kubenswrapper[4780]: I1210 12:26:48.640518 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-29rwf" Dec 10 12:26:48 crc kubenswrapper[4780]: I1210 12:26:48.689809 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-29rwf"] Dec 10 12:26:48 crc kubenswrapper[4780]: I1210 12:26:48.708599 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-29rwf"] Dec 10 12:26:49 crc kubenswrapper[4780]: E1210 12:26:49.961779 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:26:49 crc kubenswrapper[4780]: I1210 12:26:49.979727 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" path="/var/lib/kubelet/pods/c59f09c8-9aa9-47c8-be29-4f579a2c08f6/volumes" Dec 10 12:26:52 crc kubenswrapper[4780]: E1210 12:26:52.962621 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:26:58 crc kubenswrapper[4780]: I1210 12:26:58.960281 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:26:58 crc kubenswrapper[4780]: E1210 12:26:58.961280 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:27:03 crc kubenswrapper[4780]: I1210 12:27:03.031014 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-xs4hm_563406a8-97e3-4591-8448-a666a0ccaacc/kube-rbac-proxy/0.log" Dec 10 12:27:03 crc kubenswrapper[4780]: I1210 12:27:03.376029 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-xs4hm_563406a8-97e3-4591-8448-a666a0ccaacc/controller/0.log" Dec 10 12:27:03 crc kubenswrapper[4780]: I1210 12:27:03.416545 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-frr-files/0.log" Dec 10 12:27:03 crc kubenswrapper[4780]: I1210 12:27:03.609142 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-frr-files/0.log" Dec 10 12:27:03 crc kubenswrapper[4780]: I1210 12:27:03.629398 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-metrics/0.log" Dec 10 12:27:03 crc kubenswrapper[4780]: I1210 12:27:03.634435 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-reloader/0.log" Dec 10 12:27:03 crc kubenswrapper[4780]: I1210 12:27:03.653807 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-reloader/0.log" Dec 10 12:27:03 crc kubenswrapper[4780]: I1210 12:27:03.944056 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-reloader/0.log" Dec 10 12:27:03 crc kubenswrapper[4780]: E1210 12:27:03.962200 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.007991 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-metrics/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.011465 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-metrics/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.034856 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-frr-files/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.219517 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-frr-files/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.259561 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-metrics/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.286283 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/cp-reloader/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.340496 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/controller/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.541966 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/frr-metrics/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.624020 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/kube-rbac-proxy/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.653540 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/kube-rbac-proxy-frr/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: I1210 12:27:04.858034 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/reloader/0.log" Dec 10 12:27:04 crc kubenswrapper[4780]: E1210 12:27:04.961375 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:27:05 crc kubenswrapper[4780]: I1210 12:27:05.035142 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-cllk5_b32ab25c-5e31-4efb-b8d0-0ae92e4e0165/frr-k8s-webhook-server/0.log" Dec 10 12:27:05 crc kubenswrapper[4780]: I1210 12:27:05.332845 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6fb7f5d8bb-v59tk_d811e5e5-930d-403f-92a3-0fb26a063acc/manager/0.log" Dec 10 12:27:05 crc kubenswrapper[4780]: I1210 12:27:05.505656 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-b44d54f55-86z5t_2a8c4e94-9c27-48fa-aec9-21a3cd0af0a7/webhook-server/0.log" Dec 10 12:27:05 crc kubenswrapper[4780]: I1210 12:27:05.641393 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-hb4wd_2b6296fd-1a9f-4737-b9dd-332a1db07171/kube-rbac-proxy/0.log" Dec 10 12:27:06 crc kubenswrapper[4780]: I1210 12:27:06.557844 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-hb4wd_2b6296fd-1a9f-4737-b9dd-332a1db07171/speaker/0.log" Dec 10 12:27:06 crc kubenswrapper[4780]: I1210 12:27:06.560846 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jcfnl_6974b1ff-a49a-4211-90a1-802e36919842/frr/0.log" Dec 10 12:27:11 crc kubenswrapper[4780]: I1210 12:27:11.960660 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:27:11 crc kubenswrapper[4780]: E1210 12:27:11.962258 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:27:18 crc kubenswrapper[4780]: E1210 12:27:18.963966 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:27:19 crc kubenswrapper[4780]: E1210 12:27:19.962598 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:27:20 crc kubenswrapper[4780]: I1210 12:27:20.803384 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7_da7532fb-0c99-41ac-a1ba-49435b50929f/util/0.log" Dec 10 12:27:20 crc kubenswrapper[4780]: I1210 12:27:20.985898 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7_da7532fb-0c99-41ac-a1ba-49435b50929f/util/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.016557 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7_da7532fb-0c99-41ac-a1ba-49435b50929f/pull/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.016803 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7_da7532fb-0c99-41ac-a1ba-49435b50929f/pull/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.284943 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7_da7532fb-0c99-41ac-a1ba-49435b50929f/util/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.359469 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7_da7532fb-0c99-41ac-a1ba-49435b50929f/pull/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.370737 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8mgrc7_da7532fb-0c99-41ac-a1ba-49435b50929f/extract/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.559305 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp_ed11f272-72d5-4d43-a169-f122ee540562/util/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.799597 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp_ed11f272-72d5-4d43-a169-f122ee540562/util/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.801596 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp_ed11f272-72d5-4d43-a169-f122ee540562/pull/0.log" Dec 10 12:27:21 crc kubenswrapper[4780]: I1210 12:27:21.830884 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp_ed11f272-72d5-4d43-a169-f122ee540562/pull/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.090029 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp_ed11f272-72d5-4d43-a169-f122ee540562/pull/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.096677 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp_ed11f272-72d5-4d43-a169-f122ee540562/extract/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.142828 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftm7kp_ed11f272-72d5-4d43-a169-f122ee540562/util/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.581346 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp_8a9d08c5-4ca7-4ef1-b60e-60301f00728b/util/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.581514 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp_8a9d08c5-4ca7-4ef1-b60e-60301f00728b/util/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.582812 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp_8a9d08c5-4ca7-4ef1-b60e-60301f00728b/pull/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.583712 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp_8a9d08c5-4ca7-4ef1-b60e-60301f00728b/pull/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.839429 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp_8a9d08c5-4ca7-4ef1-b60e-60301f00728b/util/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.844963 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp_8a9d08c5-4ca7-4ef1-b60e-60301f00728b/pull/0.log" Dec 10 12:27:22 crc kubenswrapper[4780]: I1210 12:27:22.949188 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210scvkp_8a9d08c5-4ca7-4ef1-b60e-60301f00728b/extract/0.log" Dec 10 12:27:23 crc kubenswrapper[4780]: I1210 12:27:23.079480 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck_35c9ca81-c52f-42cc-be90-e863fd7c6bc1/util/0.log" Dec 10 12:27:23 crc kubenswrapper[4780]: I1210 12:27:23.262037 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck_35c9ca81-c52f-42cc-be90-e863fd7c6bc1/util/0.log" Dec 10 12:27:23 crc kubenswrapper[4780]: I1210 12:27:23.270415 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck_35c9ca81-c52f-42cc-be90-e863fd7c6bc1/pull/0.log" Dec 10 12:27:23 crc kubenswrapper[4780]: I1210 12:27:23.292149 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck_35c9ca81-c52f-42cc-be90-e863fd7c6bc1/pull/0.log" Dec 10 12:27:23 crc kubenswrapper[4780]: I1210 12:27:23.579953 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck_35c9ca81-c52f-42cc-be90-e863fd7c6bc1/pull/0.log" Dec 10 12:27:23 crc kubenswrapper[4780]: I1210 12:27:23.614978 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck_35c9ca81-c52f-42cc-be90-e863fd7c6bc1/extract/0.log" Dec 10 12:27:23 crc kubenswrapper[4780]: I1210 12:27:23.628674 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fd7sck_35c9ca81-c52f-42cc-be90-e863fd7c6bc1/util/0.log" Dec 10 12:27:23 crc kubenswrapper[4780]: I1210 12:27:23.827158 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx_a919dc65-d37b-4f09-b396-7dc2aa7ea03a/util/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.013183 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx_a919dc65-d37b-4f09-b396-7dc2aa7ea03a/util/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.018127 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx_a919dc65-d37b-4f09-b396-7dc2aa7ea03a/pull/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.060055 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx_a919dc65-d37b-4f09-b396-7dc2aa7ea03a/pull/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.210402 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx_a919dc65-d37b-4f09-b396-7dc2aa7ea03a/util/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.289982 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx_a919dc65-d37b-4f09-b396-7dc2aa7ea03a/pull/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.305900 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83swhtx_a919dc65-d37b-4f09-b396-7dc2aa7ea03a/extract/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.686062 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-r249g_440138f1-b946-4712-a553-fccc76e51bc2/extract-utilities/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.897201 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-r249g_440138f1-b946-4712-a553-fccc76e51bc2/extract-utilities/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.921294 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-r249g_440138f1-b946-4712-a553-fccc76e51bc2/extract-content/0.log" Dec 10 12:27:24 crc kubenswrapper[4780]: I1210 12:27:24.930529 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-r249g_440138f1-b946-4712-a553-fccc76e51bc2/extract-content/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.169760 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-r249g_440138f1-b946-4712-a553-fccc76e51bc2/extract-utilities/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.248406 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-r249g_440138f1-b946-4712-a553-fccc76e51bc2/extract-content/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.307632 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vcdcr_de46d142-d66a-4f62-887e-c1a0ef0e8da5/extract-utilities/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.563467 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vcdcr_de46d142-d66a-4f62-887e-c1a0ef0e8da5/extract-content/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.608175 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vcdcr_de46d142-d66a-4f62-887e-c1a0ef0e8da5/extract-content/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.645784 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vcdcr_de46d142-d66a-4f62-887e-c1a0ef0e8da5/extract-utilities/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.735003 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-r249g_440138f1-b946-4712-a553-fccc76e51bc2/registry-server/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.861866 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vcdcr_de46d142-d66a-4f62-887e-c1a0ef0e8da5/extract-content/0.log" Dec 10 12:27:25 crc kubenswrapper[4780]: I1210 12:27:25.882649 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vcdcr_de46d142-d66a-4f62-887e-c1a0ef0e8da5/extract-utilities/0.log" Dec 10 12:27:26 crc kubenswrapper[4780]: I1210 12:27:26.181946 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-s6pjk_7f5730ec-f362-4d4f-a032-9965ace15473/marketplace-operator/0.log" Dec 10 12:27:26 crc kubenswrapper[4780]: I1210 12:27:26.367695 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsqfx_0f530e21-48e6-453e-bb57-7d4ff179e1fc/extract-utilities/0.log" Dec 10 12:27:26 crc kubenswrapper[4780]: I1210 12:27:26.711461 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsqfx_0f530e21-48e6-453e-bb57-7d4ff179e1fc/extract-content/0.log" Dec 10 12:27:26 crc kubenswrapper[4780]: I1210 12:27:26.717239 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsqfx_0f530e21-48e6-453e-bb57-7d4ff179e1fc/extract-utilities/0.log" Dec 10 12:27:26 crc kubenswrapper[4780]: I1210 12:27:26.730504 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsqfx_0f530e21-48e6-453e-bb57-7d4ff179e1fc/extract-content/0.log" Dec 10 12:27:26 crc kubenswrapper[4780]: I1210 12:27:26.962702 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:27:26 crc kubenswrapper[4780]: E1210 12:27:26.963148 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.005029 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsqfx_0f530e21-48e6-453e-bb57-7d4ff179e1fc/extract-utilities/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.067160 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsqfx_0f530e21-48e6-453e-bb57-7d4ff179e1fc/extract-content/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.218147 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vcdcr_de46d142-d66a-4f62-887e-c1a0ef0e8da5/registry-server/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.354136 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mkxpc_f85d01b9-be91-4ff9-bf9d-886fa995d582/extract-utilities/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.405514 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsqfx_0f530e21-48e6-453e-bb57-7d4ff179e1fc/registry-server/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.526550 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mkxpc_f85d01b9-be91-4ff9-bf9d-886fa995d582/extract-content/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.560434 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mkxpc_f85d01b9-be91-4ff9-bf9d-886fa995d582/extract-content/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.582771 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mkxpc_f85d01b9-be91-4ff9-bf9d-886fa995d582/extract-utilities/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.791691 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mkxpc_f85d01b9-be91-4ff9-bf9d-886fa995d582/extract-utilities/0.log" Dec 10 12:27:27 crc kubenswrapper[4780]: I1210 12:27:27.797705 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mkxpc_f85d01b9-be91-4ff9-bf9d-886fa995d582/extract-content/0.log" Dec 10 12:27:28 crc kubenswrapper[4780]: I1210 12:27:28.802228 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-mkxpc_f85d01b9-be91-4ff9-bf9d-886fa995d582/registry-server/0.log" Dec 10 12:27:32 crc kubenswrapper[4780]: E1210 12:27:32.963108 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:27:34 crc kubenswrapper[4780]: I1210 12:27:34.963834 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:27:35 crc kubenswrapper[4780]: E1210 12:27:35.087569 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:27:35 crc kubenswrapper[4780]: E1210 12:27:35.087654 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:27:35 crc kubenswrapper[4780]: E1210 12:27:35.087822 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:27:35 crc kubenswrapper[4780]: E1210 12:27:35.089249 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:27:40 crc kubenswrapper[4780]: I1210 12:27:40.959863 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:27:40 crc kubenswrapper[4780]: E1210 12:27:40.961362 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:27:43 crc kubenswrapper[4780]: I1210 12:27:43.848693 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-fqxm5_fd80e9af-20c4-4aaa-9f38-4f46c3b610fb/prometheus-operator/0.log" Dec 10 12:27:44 crc kubenswrapper[4780]: I1210 12:27:44.173756 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7767cf9dd9-bbrkh_db680a43-b9fa-45d6-b751-f4467cfe5065/prometheus-operator-admission-webhook/0.log" Dec 10 12:27:44 crc kubenswrapper[4780]: I1210 12:27:44.185567 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7767cf9dd9-fcj9f_42449c87-f0c6-4433-92cb-f89e51cb5a14/prometheus-operator-admission-webhook/0.log" Dec 10 12:27:44 crc kubenswrapper[4780]: I1210 12:27:44.399823 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-26rg4_f1e3b9ec-1036-4bee-bbc8-336293208b48/observability-ui-dashboards/0.log" Dec 10 12:27:44 crc kubenswrapper[4780]: I1210 12:27:44.453338 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-kmhks_780b464e-7e56-49ba-b0af-fc0731e1290d/operator/0.log" Dec 10 12:27:44 crc kubenswrapper[4780]: I1210 12:27:44.615345 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-m5pj4_fb9d5eb8-6ba0-4dea-8226-a3e362924f16/perses-operator/0.log" Dec 10 12:27:44 crc kubenswrapper[4780]: E1210 12:27:44.962906 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:27:48 crc kubenswrapper[4780]: E1210 12:27:48.962428 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:27:55 crc kubenswrapper[4780]: I1210 12:27:55.976045 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:27:55 crc kubenswrapper[4780]: E1210 12:27:55.977091 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:27:55 crc kubenswrapper[4780]: E1210 12:27:55.985422 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:28:03 crc kubenswrapper[4780]: E1210 12:28:03.964048 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:28:04 crc kubenswrapper[4780]: I1210 12:28:04.025566 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-54599dc8c7-jsrb7_1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa/kube-rbac-proxy/0.log" Dec 10 12:28:04 crc kubenswrapper[4780]: I1210 12:28:04.131569 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-54599dc8c7-jsrb7_1c2ed53a-a11a-4eb0-b91e-cba4a0a510fa/manager/0.log" Dec 10 12:28:06 crc kubenswrapper[4780]: I1210 12:28:06.961803 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:28:06 crc kubenswrapper[4780]: E1210 12:28:06.962765 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:28:11 crc kubenswrapper[4780]: E1210 12:28:11.086816 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:28:11 crc kubenswrapper[4780]: E1210 12:28:11.087361 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:28:11 crc kubenswrapper[4780]: E1210 12:28:11.087553 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:28:11 crc kubenswrapper[4780]: E1210 12:28:11.088649 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:28:18 crc kubenswrapper[4780]: E1210 12:28:18.961034 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:28:21 crc kubenswrapper[4780]: I1210 12:28:21.959607 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:28:21 crc kubenswrapper[4780]: E1210 12:28:21.960521 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:28:21 crc kubenswrapper[4780]: E1210 12:28:21.964433 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:28:31 crc kubenswrapper[4780]: E1210 12:28:31.963845 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:28:33 crc kubenswrapper[4780]: I1210 12:28:33.962049 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:28:33 crc kubenswrapper[4780]: E1210 12:28:33.962952 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:28:36 crc kubenswrapper[4780]: E1210 12:28:36.963152 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:28:43 crc kubenswrapper[4780]: E1210 12:28:43.963625 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:28:44 crc kubenswrapper[4780]: I1210 12:28:44.959502 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:28:44 crc kubenswrapper[4780]: E1210 12:28:44.960417 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:28:50 crc kubenswrapper[4780]: E1210 12:28:50.963079 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:28:54 crc kubenswrapper[4780]: I1210 12:28:54.613195 4780 scope.go:117] "RemoveContainer" containerID="fe4dfda8399a1d560043d4ef0a7230a3ce9f66f5ee89ff9315f67e39ae4c5df5" Dec 10 12:28:56 crc kubenswrapper[4780]: I1210 12:28:56.959369 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:28:56 crc kubenswrapper[4780]: E1210 12:28:56.960030 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:28:56 crc kubenswrapper[4780]: E1210 12:28:56.962369 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:29:02 crc kubenswrapper[4780]: E1210 12:29:02.962557 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:29:08 crc kubenswrapper[4780]: E1210 12:29:08.962795 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:29:10 crc kubenswrapper[4780]: I1210 12:29:10.958953 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:29:10 crc kubenswrapper[4780]: E1210 12:29:10.959589 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:29:16 crc kubenswrapper[4780]: E1210 12:29:16.962998 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:29:20 crc kubenswrapper[4780]: E1210 12:29:20.960983 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:29:22 crc kubenswrapper[4780]: I1210 12:29:22.959101 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:29:22 crc kubenswrapper[4780]: E1210 12:29:22.960124 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xhdr5_openshift-machine-config-operator(6bf1dca1-b191-4796-b326-baac53e84045)\"" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" Dec 10 12:29:29 crc kubenswrapper[4780]: E1210 12:29:29.963117 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:29:31 crc kubenswrapper[4780]: E1210 12:29:31.963093 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:29:36 crc kubenswrapper[4780]: I1210 12:29:36.959306 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:29:37 crc kubenswrapper[4780]: I1210 12:29:37.867357 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"ed80fb2da51647e32144356ea3421919f7b9dfad2d2a67b5013099ddcd2130d4"} Dec 10 12:29:41 crc kubenswrapper[4780]: E1210 12:29:41.963176 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:29:46 crc kubenswrapper[4780]: E1210 12:29:46.962808 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.709617 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z8czf"] Dec 10 12:29:48 crc kubenswrapper[4780]: E1210 12:29:48.711021 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerName="extract-utilities" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.711042 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerName="extract-utilities" Dec 10 12:29:48 crc kubenswrapper[4780]: E1210 12:29:48.711771 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerName="extract-content" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.711808 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerName="extract-content" Dec 10 12:29:48 crc kubenswrapper[4780]: E1210 12:29:48.711832 4780 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerName="registry-server" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.711839 4780 state_mem.go:107] "Deleted CPUSet assignment" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerName="registry-server" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.712207 4780 memory_manager.go:354] "RemoveStaleState removing state" podUID="c59f09c8-9aa9-47c8-be29-4f579a2c08f6" containerName="registry-server" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.715146 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.729754 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z8czf"] Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.863562 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmwg5\" (UniqueName: \"kubernetes.io/projected/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-kube-api-access-tmwg5\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.863716 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-catalog-content\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.863856 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-utilities\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.966179 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-utilities\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.966432 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmwg5\" (UniqueName: \"kubernetes.io/projected/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-kube-api-access-tmwg5\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.966507 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-catalog-content\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.967186 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-catalog-content\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:48 crc kubenswrapper[4780]: I1210 12:29:48.967438 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-utilities\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:49 crc kubenswrapper[4780]: I1210 12:29:49.004873 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmwg5\" (UniqueName: \"kubernetes.io/projected/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-kube-api-access-tmwg5\") pod \"community-operators-z8czf\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:49 crc kubenswrapper[4780]: I1210 12:29:49.056632 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:49 crc kubenswrapper[4780]: I1210 12:29:49.854829 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z8czf"] Dec 10 12:29:50 crc kubenswrapper[4780]: I1210 12:29:50.034880 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8czf" event={"ID":"2d45881d-d4b0-4d1e-a372-8a10ab2d789b","Type":"ContainerStarted","Data":"0369cd678ba327f3e554f3016ab6f2fd169a560a98dd381560c8697de4cb1050"} Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.053286 4780 generic.go:334] "Generic (PLEG): container finished" podID="2d45881d-d4b0-4d1e-a372-8a10ab2d789b" containerID="ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a" exitCode=0 Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.053485 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8czf" event={"ID":"2d45881d-d4b0-4d1e-a372-8a10ab2d789b","Type":"ContainerDied","Data":"ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a"} Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.107432 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tstql"] Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.113254 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.143067 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tstql"] Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.246438 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-catalog-content\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.247044 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwh6s\" (UniqueName: \"kubernetes.io/projected/224783c9-bbf1-4645-9bdb-bee165514d6c-kube-api-access-mwh6s\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.247665 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-utilities\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.350327 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-utilities\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.350546 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-catalog-content\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.350588 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwh6s\" (UniqueName: \"kubernetes.io/projected/224783c9-bbf1-4645-9bdb-bee165514d6c-kube-api-access-mwh6s\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.351096 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-utilities\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.351393 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-catalog-content\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.382552 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwh6s\" (UniqueName: \"kubernetes.io/projected/224783c9-bbf1-4645-9bdb-bee165514d6c-kube-api-access-mwh6s\") pod \"certified-operators-tstql\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:51 crc kubenswrapper[4780]: I1210 12:29:51.447296 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:29:52 crc kubenswrapper[4780]: I1210 12:29:52.157143 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tstql"] Dec 10 12:29:53 crc kubenswrapper[4780]: I1210 12:29:53.086503 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8czf" event={"ID":"2d45881d-d4b0-4d1e-a372-8a10ab2d789b","Type":"ContainerStarted","Data":"d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8"} Dec 10 12:29:53 crc kubenswrapper[4780]: I1210 12:29:53.091678 4780 generic.go:334] "Generic (PLEG): container finished" podID="224783c9-bbf1-4645-9bdb-bee165514d6c" containerID="4e6d62541e2ddc2b0ed13cd6ce447aadad8e731383e8e286e065973674a18b29" exitCode=0 Dec 10 12:29:53 crc kubenswrapper[4780]: I1210 12:29:53.091721 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tstql" event={"ID":"224783c9-bbf1-4645-9bdb-bee165514d6c","Type":"ContainerDied","Data":"4e6d62541e2ddc2b0ed13cd6ce447aadad8e731383e8e286e065973674a18b29"} Dec 10 12:29:53 crc kubenswrapper[4780]: I1210 12:29:53.091748 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tstql" event={"ID":"224783c9-bbf1-4645-9bdb-bee165514d6c","Type":"ContainerStarted","Data":"710afe507e0ad9b79932edf0774d42e97d17cbc20a9deecce59dc45a2abb1e9d"} Dec 10 12:29:54 crc kubenswrapper[4780]: I1210 12:29:54.107865 4780 generic.go:334] "Generic (PLEG): container finished" podID="2d45881d-d4b0-4d1e-a372-8a10ab2d789b" containerID="d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8" exitCode=0 Dec 10 12:29:54 crc kubenswrapper[4780]: I1210 12:29:54.107986 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8czf" event={"ID":"2d45881d-d4b0-4d1e-a372-8a10ab2d789b","Type":"ContainerDied","Data":"d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8"} Dec 10 12:29:54 crc kubenswrapper[4780]: I1210 12:29:54.111779 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tstql" event={"ID":"224783c9-bbf1-4645-9bdb-bee165514d6c","Type":"ContainerStarted","Data":"a14dd3334ef3c84c67933d8fd1d2779d8c44f718a83fe85e4a47ac3b95a7b640"} Dec 10 12:29:56 crc kubenswrapper[4780]: E1210 12:29:56.961597 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:29:57 crc kubenswrapper[4780]: I1210 12:29:57.179208 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8czf" event={"ID":"2d45881d-d4b0-4d1e-a372-8a10ab2d789b","Type":"ContainerStarted","Data":"1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad"} Dec 10 12:29:57 crc kubenswrapper[4780]: I1210 12:29:57.211585 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z8czf" podStartSLOduration=3.68897369 podStartE2EDuration="9.211552573s" podCreationTimestamp="2025-12-10 12:29:48 +0000 UTC" firstStartedPulling="2025-12-10 12:29:51.055597273 +0000 UTC m=+6295.908990716" lastFinishedPulling="2025-12-10 12:29:56.578176156 +0000 UTC m=+6301.431569599" observedRunningTime="2025-12-10 12:29:57.20159388 +0000 UTC m=+6302.054987313" watchObservedRunningTime="2025-12-10 12:29:57.211552573 +0000 UTC m=+6302.064946016" Dec 10 12:29:57 crc kubenswrapper[4780]: E1210 12:29:57.983346 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:29:59 crc kubenswrapper[4780]: I1210 12:29:59.057240 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:59 crc kubenswrapper[4780]: I1210 12:29:59.057973 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:29:59 crc kubenswrapper[4780]: I1210 12:29:59.210839 4780 generic.go:334] "Generic (PLEG): container finished" podID="224783c9-bbf1-4645-9bdb-bee165514d6c" containerID="a14dd3334ef3c84c67933d8fd1d2779d8c44f718a83fe85e4a47ac3b95a7b640" exitCode=0 Dec 10 12:29:59 crc kubenswrapper[4780]: I1210 12:29:59.210938 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tstql" event={"ID":"224783c9-bbf1-4645-9bdb-bee165514d6c","Type":"ContainerDied","Data":"a14dd3334ef3c84c67933d8fd1d2779d8c44f718a83fe85e4a47ac3b95a7b640"} Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.251333 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-z8czf" podUID="2d45881d-d4b0-4d1e-a372-8a10ab2d789b" containerName="registry-server" probeResult="failure" output=< Dec 10 12:30:00 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 12:30:00 crc kubenswrapper[4780]: > Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.290376 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tstql" event={"ID":"224783c9-bbf1-4645-9bdb-bee165514d6c","Type":"ContainerStarted","Data":"07a2047ce77af1568319cc3ba5207ea18e581e7495071029d03a70071e7e2a27"} Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.369386 4780 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs"] Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.373637 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.379375 4780 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tstql" podStartSLOduration=2.785493047 podStartE2EDuration="9.379345707s" podCreationTimestamp="2025-12-10 12:29:51 +0000 UTC" firstStartedPulling="2025-12-10 12:29:53.095048289 +0000 UTC m=+6297.948441752" lastFinishedPulling="2025-12-10 12:29:59.688900969 +0000 UTC m=+6304.542294412" observedRunningTime="2025-12-10 12:30:00.360099108 +0000 UTC m=+6305.213492551" watchObservedRunningTime="2025-12-10 12:30:00.379345707 +0000 UTC m=+6305.232739160" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.381612 4780 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.381635 4780 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.440810 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs"] Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.511277 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b19147eb-c15c-46c9-bbcd-004776aa6223-secret-volume\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.511655 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b19147eb-c15c-46c9-bbcd-004776aa6223-config-volume\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.512030 4780 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnhld\" (UniqueName: \"kubernetes.io/projected/b19147eb-c15c-46c9-bbcd-004776aa6223-kube-api-access-rnhld\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.614906 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b19147eb-c15c-46c9-bbcd-004776aa6223-config-volume\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.615099 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnhld\" (UniqueName: \"kubernetes.io/projected/b19147eb-c15c-46c9-bbcd-004776aa6223-kube-api-access-rnhld\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.615176 4780 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b19147eb-c15c-46c9-bbcd-004776aa6223-secret-volume\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.617501 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b19147eb-c15c-46c9-bbcd-004776aa6223-config-volume\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.626495 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b19147eb-c15c-46c9-bbcd-004776aa6223-secret-volume\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.642288 4780 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnhld\" (UniqueName: \"kubernetes.io/projected/b19147eb-c15c-46c9-bbcd-004776aa6223-kube-api-access-rnhld\") pod \"collect-profiles-29422830-p6shs\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:00 crc kubenswrapper[4780]: I1210 12:30:00.706004 4780 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:01 crc kubenswrapper[4780]: I1210 12:30:01.448062 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:30:01 crc kubenswrapper[4780]: I1210 12:30:01.448489 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:30:02 crc kubenswrapper[4780]: I1210 12:30:02.406510 4780 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs"] Dec 10 12:30:02 crc kubenswrapper[4780]: W1210 12:30:02.408381 4780 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb19147eb_c15c_46c9_bbcd_004776aa6223.slice/crio-3d5a11154ca8a3b4f6c2825de8dbfca68f7ce62c6b4a602180c57cbd77e8dfa1 WatchSource:0}: Error finding container 3d5a11154ca8a3b4f6c2825de8dbfca68f7ce62c6b4a602180c57cbd77e8dfa1: Status 404 returned error can't find the container with id 3d5a11154ca8a3b4f6c2825de8dbfca68f7ce62c6b4a602180c57cbd77e8dfa1 Dec 10 12:30:02 crc kubenswrapper[4780]: I1210 12:30:02.695831 4780 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-tstql" podUID="224783c9-bbf1-4645-9bdb-bee165514d6c" containerName="registry-server" probeResult="failure" output=< Dec 10 12:30:02 crc kubenswrapper[4780]: timeout: failed to connect service ":50051" within 1s Dec 10 12:30:02 crc kubenswrapper[4780]: > Dec 10 12:30:03 crc kubenswrapper[4780]: I1210 12:30:03.337089 4780 generic.go:334] "Generic (PLEG): container finished" podID="b19147eb-c15c-46c9-bbcd-004776aa6223" containerID="ea8018dc9430808058fd223d0085ab7ee971f6a26d296d96c2180bb7b32e70b2" exitCode=0 Dec 10 12:30:03 crc kubenswrapper[4780]: I1210 12:30:03.337255 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" event={"ID":"b19147eb-c15c-46c9-bbcd-004776aa6223","Type":"ContainerDied","Data":"ea8018dc9430808058fd223d0085ab7ee971f6a26d296d96c2180bb7b32e70b2"} Dec 10 12:30:03 crc kubenswrapper[4780]: I1210 12:30:03.337426 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" event={"ID":"b19147eb-c15c-46c9-bbcd-004776aa6223","Type":"ContainerStarted","Data":"3d5a11154ca8a3b4f6c2825de8dbfca68f7ce62c6b4a602180c57cbd77e8dfa1"} Dec 10 12:30:04 crc kubenswrapper[4780]: I1210 12:30:04.896627 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:04 crc kubenswrapper[4780]: I1210 12:30:04.998327 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b19147eb-c15c-46c9-bbcd-004776aa6223-config-volume\") pod \"b19147eb-c15c-46c9-bbcd-004776aa6223\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " Dec 10 12:30:04 crc kubenswrapper[4780]: I1210 12:30:04.998536 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnhld\" (UniqueName: \"kubernetes.io/projected/b19147eb-c15c-46c9-bbcd-004776aa6223-kube-api-access-rnhld\") pod \"b19147eb-c15c-46c9-bbcd-004776aa6223\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " Dec 10 12:30:04 crc kubenswrapper[4780]: I1210 12:30:04.998959 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b19147eb-c15c-46c9-bbcd-004776aa6223-secret-volume\") pod \"b19147eb-c15c-46c9-bbcd-004776aa6223\" (UID: \"b19147eb-c15c-46c9-bbcd-004776aa6223\") " Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:04.999906 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b19147eb-c15c-46c9-bbcd-004776aa6223-config-volume" (OuterVolumeSpecName: "config-volume") pod "b19147eb-c15c-46c9-bbcd-004776aa6223" (UID: "b19147eb-c15c-46c9-bbcd-004776aa6223"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:05.044882 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b19147eb-c15c-46c9-bbcd-004776aa6223-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b19147eb-c15c-46c9-bbcd-004776aa6223" (UID: "b19147eb-c15c-46c9-bbcd-004776aa6223"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:05.073242 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b19147eb-c15c-46c9-bbcd-004776aa6223-kube-api-access-rnhld" (OuterVolumeSpecName: "kube-api-access-rnhld") pod "b19147eb-c15c-46c9-bbcd-004776aa6223" (UID: "b19147eb-c15c-46c9-bbcd-004776aa6223"). InnerVolumeSpecName "kube-api-access-rnhld". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:05.107365 4780 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b19147eb-c15c-46c9-bbcd-004776aa6223-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:05.107415 4780 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b19147eb-c15c-46c9-bbcd-004776aa6223-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:05.107431 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnhld\" (UniqueName: \"kubernetes.io/projected/b19147eb-c15c-46c9-bbcd-004776aa6223-kube-api-access-rnhld\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:05.364914 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" event={"ID":"b19147eb-c15c-46c9-bbcd-004776aa6223","Type":"ContainerDied","Data":"3d5a11154ca8a3b4f6c2825de8dbfca68f7ce62c6b4a602180c57cbd77e8dfa1"} Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:05.365002 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d5a11154ca8a3b4f6c2825de8dbfca68f7ce62c6b4a602180c57cbd77e8dfa1" Dec 10 12:30:05 crc kubenswrapper[4780]: I1210 12:30:05.365036 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422830-p6shs" Dec 10 12:30:06 crc kubenswrapper[4780]: I1210 12:30:06.041468 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c"] Dec 10 12:30:06 crc kubenswrapper[4780]: I1210 12:30:06.060832 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422785-56b4c"] Dec 10 12:30:08 crc kubenswrapper[4780]: I1210 12:30:07.977732 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3245693f-ba76-410c-a3c7-4d6b6de92ed5" path="/var/lib/kubelet/pods/3245693f-ba76-410c-a3c7-4d6b6de92ed5/volumes" Dec 10 12:30:09 crc kubenswrapper[4780]: I1210 12:30:09.127046 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:30:09 crc kubenswrapper[4780]: I1210 12:30:09.192114 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:30:09 crc kubenswrapper[4780]: E1210 12:30:09.966194 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:30:10 crc kubenswrapper[4780]: I1210 12:30:10.296588 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z8czf"] Dec 10 12:30:10 crc kubenswrapper[4780]: I1210 12:30:10.442539 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z8czf" podUID="2d45881d-d4b0-4d1e-a372-8a10ab2d789b" containerName="registry-server" containerID="cri-o://1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad" gracePeriod=2 Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.039235 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.123538 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-utilities\") pod \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.123602 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmwg5\" (UniqueName: \"kubernetes.io/projected/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-kube-api-access-tmwg5\") pod \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.123638 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-catalog-content\") pod \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\" (UID: \"2d45881d-d4b0-4d1e-a372-8a10ab2d789b\") " Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.125834 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-utilities" (OuterVolumeSpecName: "utilities") pod "2d45881d-d4b0-4d1e-a372-8a10ab2d789b" (UID: "2d45881d-d4b0-4d1e-a372-8a10ab2d789b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.134566 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-kube-api-access-tmwg5" (OuterVolumeSpecName: "kube-api-access-tmwg5") pod "2d45881d-d4b0-4d1e-a372-8a10ab2d789b" (UID: "2d45881d-d4b0-4d1e-a372-8a10ab2d789b"). InnerVolumeSpecName "kube-api-access-tmwg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.195850 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2d45881d-d4b0-4d1e-a372-8a10ab2d789b" (UID: "2d45881d-d4b0-4d1e-a372-8a10ab2d789b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.228010 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.228077 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmwg5\" (UniqueName: \"kubernetes.io/projected/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-kube-api-access-tmwg5\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.228098 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d45881d-d4b0-4d1e-a372-8a10ab2d789b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.463906 4780 generic.go:334] "Generic (PLEG): container finished" podID="2d45881d-d4b0-4d1e-a372-8a10ab2d789b" containerID="1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad" exitCode=0 Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.464008 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8czf" event={"ID":"2d45881d-d4b0-4d1e-a372-8a10ab2d789b","Type":"ContainerDied","Data":"1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad"} Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.464060 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8czf" event={"ID":"2d45881d-d4b0-4d1e-a372-8a10ab2d789b","Type":"ContainerDied","Data":"0369cd678ba327f3e554f3016ab6f2fd169a560a98dd381560c8697de4cb1050"} Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.464092 4780 scope.go:117] "RemoveContainer" containerID="1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.464321 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8czf" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.536653 4780 scope.go:117] "RemoveContainer" containerID="d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.552870 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z8czf"] Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.564104 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z8czf"] Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.576816 4780 scope.go:117] "RemoveContainer" containerID="ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.577550 4780 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.636119 4780 scope.go:117] "RemoveContainer" containerID="1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad" Dec 10 12:30:11 crc kubenswrapper[4780]: E1210 12:30:11.637818 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad\": container with ID starting with 1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad not found: ID does not exist" containerID="1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.637906 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad"} err="failed to get container status \"1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad\": rpc error: code = NotFound desc = could not find container \"1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad\": container with ID starting with 1e91a470c6d7f0ffdfb81a145ffcc9fe141ab304d11c0b5bb0574e0b9ba5c4ad not found: ID does not exist" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.637969 4780 scope.go:117] "RemoveContainer" containerID="d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8" Dec 10 12:30:11 crc kubenswrapper[4780]: E1210 12:30:11.638416 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8\": container with ID starting with d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8 not found: ID does not exist" containerID="d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.638446 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8"} err="failed to get container status \"d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8\": rpc error: code = NotFound desc = could not find container \"d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8\": container with ID starting with d14601abd546a9afb7fe2bee0f08f2a808beaeba01a695430808434fe27bcae8 not found: ID does not exist" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.638467 4780 scope.go:117] "RemoveContainer" containerID="ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a" Dec 10 12:30:11 crc kubenswrapper[4780]: E1210 12:30:11.640851 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a\": container with ID starting with ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a not found: ID does not exist" containerID="ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.640905 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a"} err="failed to get container status \"ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a\": rpc error: code = NotFound desc = could not find container \"ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a\": container with ID starting with ceb8e95987baf4d8f5cd372ce260c983527a4eaf35919d0b01dd524ca910055a not found: ID does not exist" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.665596 4780 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:30:11 crc kubenswrapper[4780]: E1210 12:30:11.969501 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:30:11 crc kubenswrapper[4780]: I1210 12:30:11.979279 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d45881d-d4b0-4d1e-a372-8a10ab2d789b" path="/var/lib/kubelet/pods/2d45881d-d4b0-4d1e-a372-8a10ab2d789b/volumes" Dec 10 12:30:13 crc kubenswrapper[4780]: I1210 12:30:13.904671 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tstql"] Dec 10 12:30:13 crc kubenswrapper[4780]: I1210 12:30:13.905279 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tstql" podUID="224783c9-bbf1-4645-9bdb-bee165514d6c" containerName="registry-server" containerID="cri-o://07a2047ce77af1568319cc3ba5207ea18e581e7495071029d03a70071e7e2a27" gracePeriod=2 Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.587654 4780 generic.go:334] "Generic (PLEG): container finished" podID="224783c9-bbf1-4645-9bdb-bee165514d6c" containerID="07a2047ce77af1568319cc3ba5207ea18e581e7495071029d03a70071e7e2a27" exitCode=0 Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.587747 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tstql" event={"ID":"224783c9-bbf1-4645-9bdb-bee165514d6c","Type":"ContainerDied","Data":"07a2047ce77af1568319cc3ba5207ea18e581e7495071029d03a70071e7e2a27"} Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.588501 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tstql" event={"ID":"224783c9-bbf1-4645-9bdb-bee165514d6c","Type":"ContainerDied","Data":"710afe507e0ad9b79932edf0774d42e97d17cbc20a9deecce59dc45a2abb1e9d"} Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.588530 4780 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="710afe507e0ad9b79932edf0774d42e97d17cbc20a9deecce59dc45a2abb1e9d" Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.637906 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.752571 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-utilities\") pod \"224783c9-bbf1-4645-9bdb-bee165514d6c\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.752788 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-catalog-content\") pod \"224783c9-bbf1-4645-9bdb-bee165514d6c\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.753030 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwh6s\" (UniqueName: \"kubernetes.io/projected/224783c9-bbf1-4645-9bdb-bee165514d6c-kube-api-access-mwh6s\") pod \"224783c9-bbf1-4645-9bdb-bee165514d6c\" (UID: \"224783c9-bbf1-4645-9bdb-bee165514d6c\") " Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.754010 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-utilities" (OuterVolumeSpecName: "utilities") pod "224783c9-bbf1-4645-9bdb-bee165514d6c" (UID: "224783c9-bbf1-4645-9bdb-bee165514d6c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.760207 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/224783c9-bbf1-4645-9bdb-bee165514d6c-kube-api-access-mwh6s" (OuterVolumeSpecName: "kube-api-access-mwh6s") pod "224783c9-bbf1-4645-9bdb-bee165514d6c" (UID: "224783c9-bbf1-4645-9bdb-bee165514d6c"). InnerVolumeSpecName "kube-api-access-mwh6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.832244 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "224783c9-bbf1-4645-9bdb-bee165514d6c" (UID: "224783c9-bbf1-4645-9bdb-bee165514d6c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.857061 4780 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.857447 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwh6s\" (UniqueName: \"kubernetes.io/projected/224783c9-bbf1-4645-9bdb-bee165514d6c-kube-api-access-mwh6s\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:14 crc kubenswrapper[4780]: I1210 12:30:14.857540 4780 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/224783c9-bbf1-4645-9bdb-bee165514d6c-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:15 crc kubenswrapper[4780]: I1210 12:30:15.604667 4780 generic.go:334] "Generic (PLEG): container finished" podID="981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9" containerID="98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a" exitCode=0 Dec 10 12:30:15 crc kubenswrapper[4780]: I1210 12:30:15.604782 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" event={"ID":"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9","Type":"ContainerDied","Data":"98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a"} Dec 10 12:30:15 crc kubenswrapper[4780]: I1210 12:30:15.605098 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tstql" Dec 10 12:30:15 crc kubenswrapper[4780]: I1210 12:30:15.606027 4780 scope.go:117] "RemoveContainer" containerID="98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a" Dec 10 12:30:15 crc kubenswrapper[4780]: I1210 12:30:15.660227 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tstql"] Dec 10 12:30:15 crc kubenswrapper[4780]: I1210 12:30:15.675003 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tstql"] Dec 10 12:30:15 crc kubenswrapper[4780]: I1210 12:30:15.694621 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tjsq4_must-gather-d5bhw_981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9/gather/0.log" Dec 10 12:30:16 crc kubenswrapper[4780]: I1210 12:30:16.088745 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="224783c9-bbf1-4645-9bdb-bee165514d6c" path="/var/lib/kubelet/pods/224783c9-bbf1-4645-9bdb-bee165514d6c/volumes" Dec 10 12:30:21 crc kubenswrapper[4780]: E1210 12:30:21.967602 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:30:24 crc kubenswrapper[4780]: I1210 12:30:24.831777 4780 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-tjsq4/must-gather-d5bhw"] Dec 10 12:30:24 crc kubenswrapper[4780]: I1210 12:30:24.832931 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" podUID="981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9" containerName="copy" containerID="cri-o://3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce" gracePeriod=2 Dec 10 12:30:24 crc kubenswrapper[4780]: I1210 12:30:24.852570 4780 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-tjsq4/must-gather-d5bhw"] Dec 10 12:30:24 crc kubenswrapper[4780]: E1210 12:30:24.968708 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.852294 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tjsq4_must-gather-d5bhw_981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9/copy/0.log" Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.853874 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.932152 4780 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tjsq4_must-gather-d5bhw_981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9/copy/0.log" Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.933044 4780 generic.go:334] "Generic (PLEG): container finished" podID="981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9" containerID="3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce" exitCode=143 Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.933129 4780 scope.go:117] "RemoveContainer" containerID="3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce" Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.933306 4780 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tjsq4/must-gather-d5bhw" Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.957843 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psn2m\" (UniqueName: \"kubernetes.io/projected/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-kube-api-access-psn2m\") pod \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\" (UID: \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\") " Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.958083 4780 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-must-gather-output\") pod \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\" (UID: \"981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9\") " Dec 10 12:30:25 crc kubenswrapper[4780]: I1210 12:30:25.973658 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-kube-api-access-psn2m" (OuterVolumeSpecName: "kube-api-access-psn2m") pod "981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9" (UID: "981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9"). InnerVolumeSpecName "kube-api-access-psn2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:30:26 crc kubenswrapper[4780]: I1210 12:30:26.005712 4780 scope.go:117] "RemoveContainer" containerID="98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a" Dec 10 12:30:26 crc kubenswrapper[4780]: I1210 12:30:26.063644 4780 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psn2m\" (UniqueName: \"kubernetes.io/projected/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-kube-api-access-psn2m\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:26 crc kubenswrapper[4780]: I1210 12:30:26.235115 4780 scope.go:117] "RemoveContainer" containerID="3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce" Dec 10 12:30:26 crc kubenswrapper[4780]: E1210 12:30:26.236087 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce\": container with ID starting with 3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce not found: ID does not exist" containerID="3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce" Dec 10 12:30:26 crc kubenswrapper[4780]: I1210 12:30:26.236248 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce"} err="failed to get container status \"3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce\": rpc error: code = NotFound desc = could not find container \"3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce\": container with ID starting with 3f40242e21888b3af54cab5c11d7153ef7dd9a71241bbfb7ceb91f2a3f4affce not found: ID does not exist" Dec 10 12:30:26 crc kubenswrapper[4780]: I1210 12:30:26.236490 4780 scope.go:117] "RemoveContainer" containerID="98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a" Dec 10 12:30:26 crc kubenswrapper[4780]: E1210 12:30:26.238561 4780 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a\": container with ID starting with 98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a not found: ID does not exist" containerID="98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a" Dec 10 12:30:26 crc kubenswrapper[4780]: I1210 12:30:26.238595 4780 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a"} err="failed to get container status \"98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a\": rpc error: code = NotFound desc = could not find container \"98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a\": container with ID starting with 98a930fdba8a79e7c49aaf5cd7d49cb1217dd0fa947fc63c58fb7ad4a805a31a not found: ID does not exist" Dec 10 12:30:26 crc kubenswrapper[4780]: I1210 12:30:26.353289 4780 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9" (UID: "981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:30:26 crc kubenswrapper[4780]: I1210 12:30:26.391801 4780 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 10 12:30:27 crc kubenswrapper[4780]: I1210 12:30:27.973545 4780 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9" path="/var/lib/kubelet/pods/981adbf3-17b2-4e35-a59b-1f7ffd7b8bd9/volumes" Dec 10 12:30:32 crc kubenswrapper[4780]: E1210 12:30:32.961087 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:30:35 crc kubenswrapper[4780]: E1210 12:30:35.969637 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:30:44 crc kubenswrapper[4780]: E1210 12:30:44.961047 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:30:50 crc kubenswrapper[4780]: E1210 12:30:50.962810 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:30:54 crc kubenswrapper[4780]: I1210 12:30:54.858094 4780 scope.go:117] "RemoveContainer" containerID="8587bf4f4a392d814fa8cadfb6dca551ae040ac5eca1cc89b8e0919f9de55f9d" Dec 10 12:30:57 crc kubenswrapper[4780]: E1210 12:30:57.964402 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:31:06 crc kubenswrapper[4780]: E1210 12:31:06.220497 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:31:12 crc kubenswrapper[4780]: E1210 12:31:12.962824 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:31:17 crc kubenswrapper[4780]: E1210 12:31:17.962394 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:31:26 crc kubenswrapper[4780]: E1210 12:31:26.963622 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:31:29 crc kubenswrapper[4780]: I1210 12:31:29.921583 4780 trace.go:236] Trace[1309275370]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-index-gateway-0" (10-Dec-2025 12:31:26.386) (total time: 3534ms): Dec 10 12:31:29 crc kubenswrapper[4780]: Trace[1309275370]: [3.534945855s] [3.534945855s] END Dec 10 12:31:30 crc kubenswrapper[4780]: E1210 12:31:30.963699 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:31:39 crc kubenswrapper[4780]: E1210 12:31:39.965767 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:31:43 crc kubenswrapper[4780]: E1210 12:31:43.963358 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:31:53 crc kubenswrapper[4780]: E1210 12:31:53.963934 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:31:57 crc kubenswrapper[4780]: I1210 12:31:57.475943 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:31:57 crc kubenswrapper[4780]: I1210 12:31:57.476986 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:31:58 crc kubenswrapper[4780]: E1210 12:31:58.961391 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:32:04 crc kubenswrapper[4780]: E1210 12:32:04.962073 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:32:09 crc kubenswrapper[4780]: E1210 12:32:09.962071 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:32:19 crc kubenswrapper[4780]: E1210 12:32:19.963638 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:32:24 crc kubenswrapper[4780]: E1210 12:32:24.160071 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:32:27 crc kubenswrapper[4780]: I1210 12:32:27.525788 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:32:27 crc kubenswrapper[4780]: I1210 12:32:27.526365 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:32:30 crc kubenswrapper[4780]: E1210 12:32:30.962979 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:32:39 crc kubenswrapper[4780]: I1210 12:32:39.962365 4780 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:32:40 crc kubenswrapper[4780]: E1210 12:32:40.088539 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:32:40 crc kubenswrapper[4780]: E1210 12:32:40.088644 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 12:32:40 crc kubenswrapper[4780]: E1210 12:32:40.088829 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh6ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-nd4t7_openstack(4ba2892c-316e-4819-a33c-d7b2b6803553): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:32:40 crc kubenswrapper[4780]: E1210 12:32:40.090069 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:32:44 crc kubenswrapper[4780]: E1210 12:32:44.961561 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:32:50 crc kubenswrapper[4780]: E1210 12:32:50.964281 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:32:54 crc kubenswrapper[4780]: I1210 12:32:54.985303 4780 scope.go:117] "RemoveContainer" containerID="79fbda25921b4b262ab01a74a895034a54e6fa7b3cbecb1abfb440601fc72673" Dec 10 12:32:57 crc kubenswrapper[4780]: I1210 12:32:57.475979 4780 patch_prober.go:28] interesting pod/machine-config-daemon-xhdr5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:32:57 crc kubenswrapper[4780]: I1210 12:32:57.476596 4780 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:32:57 crc kubenswrapper[4780]: I1210 12:32:57.476678 4780 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" Dec 10 12:32:57 crc kubenswrapper[4780]: I1210 12:32:57.478066 4780 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ed80fb2da51647e32144356ea3421919f7b9dfad2d2a67b5013099ddcd2130d4"} pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:32:57 crc kubenswrapper[4780]: I1210 12:32:57.478139 4780 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" podUID="6bf1dca1-b191-4796-b326-baac53e84045" containerName="machine-config-daemon" containerID="cri-o://ed80fb2da51647e32144356ea3421919f7b9dfad2d2a67b5013099ddcd2130d4" gracePeriod=600 Dec 10 12:32:57 crc kubenswrapper[4780]: E1210 12:32:57.961575 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:33:03 crc kubenswrapper[4780]: E1210 12:33:03.961158 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:33:08 crc kubenswrapper[4780]: E1210 12:33:08.961686 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:33:14 crc kubenswrapper[4780]: I1210 12:33:14.129487 4780 trace.go:236] Trace[1140802826]: "iptables ChainExists" (10-Dec-2025 12:31:55.963) (total time: 78165ms): Dec 10 12:33:14 crc kubenswrapper[4780]: Trace[1140802826]: [1m18.165500866s] [1m18.165500866s] END Dec 10 12:33:14 crc kubenswrapper[4780]: I1210 12:33:14.141863 4780 trace.go:236] Trace[579960482]: "iptables ChainExists" (10-Dec-2025 12:31:55.965) (total time: 78175ms): Dec 10 12:33:14 crc kubenswrapper[4780]: Trace[579960482]: [1m18.175369367s] [1m18.175369367s] END Dec 10 12:33:14 crc kubenswrapper[4780]: I1210 12:33:14.175486 4780 scope.go:117] "RemoveContainer" containerID="916f3b3b1250203070bda7e02813efc1a7563ccda5ad3af5ac71607d8950295f" Dec 10 12:33:14 crc kubenswrapper[4780]: I1210 12:33:14.307483 4780 generic.go:334] "Generic (PLEG): container finished" podID="6bf1dca1-b191-4796-b326-baac53e84045" containerID="ed80fb2da51647e32144356ea3421919f7b9dfad2d2a67b5013099ddcd2130d4" exitCode=0 Dec 10 12:33:14 crc kubenswrapper[4780]: I1210 12:33:14.307571 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerDied","Data":"ed80fb2da51647e32144356ea3421919f7b9dfad2d2a67b5013099ddcd2130d4"} Dec 10 12:33:14 crc kubenswrapper[4780]: I1210 12:33:14.307648 4780 scope.go:117] "RemoveContainer" containerID="0c9e8a8764b3e192f255d986c37b68fa82367c56c8fc86fca7a6f21654558176" Dec 10 12:33:15 crc kubenswrapper[4780]: I1210 12:33:15.705829 4780 scope.go:117] "RemoveContainer" containerID="bac908c55deb780d336f73e77028cb6c163158f479ba589c153615d887b5f1a0" Dec 10 12:33:16 crc kubenswrapper[4780]: I1210 12:33:16.359109 4780 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xhdr5" event={"ID":"6bf1dca1-b191-4796-b326-baac53e84045","Type":"ContainerStarted","Data":"5e9400db2f8021f68c69689ff212b1a96004c0153f8b5a9cdd21c3e1c98ba236"} Dec 10 12:33:17 crc kubenswrapper[4780]: E1210 12:33:17.963519 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:33:24 crc kubenswrapper[4780]: E1210 12:33:24.100603 4780 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:33:24 crc kubenswrapper[4780]: E1210 12:33:24.101306 4780 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:33:24 crc kubenswrapper[4780]: E1210 12:33:24.101551 4780 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5d9hcfh66bh66bh89h5cdh97h57ch598h68h5b5h689h56chc5h96h58ch687h5dfh5ddh645h68bhcchcdh56ch56fh9fh654hd4h8dhb9h74h59cq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gf2w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(317b5b7c-bb08-4441-a2ef-8c2d7390ada6): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:33:24 crc kubenswrapper[4780]: E1210 12:33:24.102798 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:33:28 crc kubenswrapper[4780]: E1210 12:33:28.964171 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:33:38 crc kubenswrapper[4780]: E1210 12:33:38.962000 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:33:43 crc kubenswrapper[4780]: E1210 12:33:43.961509 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:33:52 crc kubenswrapper[4780]: E1210 12:33:52.960730 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:33:58 crc kubenswrapper[4780]: E1210 12:33:58.964237 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:34:05 crc kubenswrapper[4780]: E1210 12:34:05.962213 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:34:12 crc kubenswrapper[4780]: E1210 12:34:12.961050 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:34:17 crc kubenswrapper[4780]: E1210 12:34:17.962351 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" Dec 10 12:34:25 crc kubenswrapper[4780]: E1210 12:34:25.996548 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-nd4t7" podUID="4ba2892c-316e-4819-a33c-d7b2b6803553" Dec 10 12:34:30 crc kubenswrapper[4780]: E1210 12:34:30.963388 4780 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="317b5b7c-bb08-4441-a2ef-8c2d7390ada6" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515116264342024451 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015116264343017367 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015116247132016507 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015116247132015457 5ustar corecore